text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Generic epilogue for implementing certain kinds of fused epilogue behavior. */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////////////////////////// class EpilogueFusedVisitorConcept { public: static int const kIterations = 1; static int const kElementsPerAccess = 4; using ElementOutput = float; using ElementAccumulator = float; using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>; /// Arguments structure struct Arguments { }; /// Params structure struct Params { Params() { } Params(Arguments const &args) { } }; /// Shared storage struct SharedStorage { }; public: CUTLASS_DEVICE EpilogueFusedVisitorConcept( Params const &params, ///< Parameters routed to the epilogue SharedStorage &shared_storage, ///< Shared storage needed by the functors here MatrixCoord const &problem_size, ///< Problem size of the output int thread_idx, ///< Thread index within the threadblock int warp_idx, ///< Warp index within the threadblock int lane_idx, ///< Lane index within the warp MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)) { ///< Coordinate } /// Helper to indicate split-K behavior CUTLASS_DEVICE void set_k_partition( int split_k_index, ///< Index of this threadblock within split-K partitioned scheme int split_k_slices) { ///< Total number of split-K slices } /// Called to set the batch index CUTLASS_DEVICE void set_batch_index(int batch_idx) { } /// Called at the start of the epilogue just before iterating over accumulator slices CUTLASS_DEVICE void begin_epilogue() { } /// Called at the start of one step before starting accumulator exchange CUTLASS_DEVICE void begin_step(int step_idx) { } /// Called at the start of a row CUTLASS_DEVICE void begin_row(int row_idx) { } /// Called after accumulators have been exchanged for each accumulator vector CUTLASS_DEVICE void visit( int iter_idx, int row_idx, int column_idx, int frag_idx, AccumulatorFragment const &accum) { } /// Called at the end of a row CUTLASS_DEVICE void end_row(int row_idx) { } /// Called after all accumulator elements have been visited CUTLASS_DEVICE void end_step(int step_idx) { } /// Called after all steps have been completed CUTLASS_DEVICE void end_epilogue() { } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Visitor_, ///< Functor containing fused operations (satisfies EpilogueFusedVisitorConcept) typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (true || !IsEpilogueFunctorHeavy<Visitor_>::value) > class EpilogueWithVisitor : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Visitor = Visitor_; using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output access size static int const kElementsPerAccess = Visitor::kElementsPerAccess; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Array type used by output functor using AccumulatorAccessType = Array< typename WarpTileIterator::Element, kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; using SharedStorage = typename Base::SharedStorage; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; public: /// Constructor CUTLASS_DEVICE EpilogueWithVisitor( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.reference(), thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( Visitor & visitor, AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) visitor.begin_epilogue(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? Visitor::kIterations : 1) for (int iter_idx = 0; iter_idx < Visitor::kIterations; ++iter_idx) { // // Load the source // visitor.begin_step(iter_idx); // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<Visitor::kIterations>>::push( iter_idx, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Iterate over output fragments // AccumulatorAccessType const *accum_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]); int const kAccumulatorFragmentCount = AccumulatorTile::kElements / (Visitor::kIterations * AccumulatorAccessType::kElements); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) { int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn; int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn; // Start a new row of the output fragment if (!col_idx) { visitor.begin_row(row_idx); } visitor.visit( iter_idx, row_idx, col_idx, idx, accum_frag_ptr[idx] ); // End the row of the output fragment if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) { visitor.end_row(row_idx); } } // // Conclude the step // visitor.end_step(iter_idx); } visitor.end_epilogue(); } private: template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to create an EpilogueWithVisitor from an existing epilogue template <typename Visitor_, typename Existing_, bool IterationsUnroll = true> struct EpilogueWithVisitorFromExistingEpilogue { using Epilogue = EpilogueWithVisitor< Visitor_, typename Existing_::Shape, typename Existing_::WarpMmaOperator, Existing_::kPartitionsK, typename Existing_::AccumulatorFragmentIterator, typename Existing_::WarpTileIterator, typename Existing_::SharedLoadIterator, typename Existing_::Padding, Existing_::kFragmentsPerIteration, IterationsUnroll >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor.h", "repo_id": "cutlass", "token_count": 4847 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/permute.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" #include "cutlass/conv/conv2d_problem_size.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap) typename Element_, ///< Element data type typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>, typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > class PredicatedTileIteratorDirectConv { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; using ConvProblemSize = typename cutlass::conv::Conv2dProblemSize; /// Fragment object using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, kElementsPerAccess>; static int const kLoadsPerAccess = AccessType::kElements / AccessType::kElements; using ThreadTileCount = MatrixShape< ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW >; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorDirect2dConvParams { using Base = PredicatedTileIteratorDirect2dConvParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout, cutlass::conv::Conv2dProblemSize const &problem_size): PredicatedTileIteratorDirect2dConvParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, problem_size, {ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW} ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kContiguous; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorDirect2dConvParams params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Element *pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in rows Index extent_column_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column Index thread_start_column_; /// Initial thread output location int thread_start_n_, thread_start_p_, thread_start_q_; /// Current threadblock tile index int tile_index_; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorDirect2dConvParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorDirectConv( PredicatedTileIteratorDirect2dConvParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord() ): params_(params), pointer_(pointer) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); extent_row_ = extent.row(); extent_column_ = extent.column(); // stride dim (PQ) thread_start_row_ = thread_offset.column(); // contiguous dim (Channels) thread_start_column_ = threadblock_offset.column() + thread_offset.row(); tile_index_ = threadblock_offset.row(); set_tile_index(0); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void set_tile_index(const int index) { int residual; params_.pq_divmod(thread_start_n_, residual, tile_index_ + index); params_.q_divmod(thread_start_p_, thread_start_q_, residual); // Compute the base output coord of ThreadBlock thread_start_p_ *= ThreadBlockOutputShape::kH; thread_start_q_ *= ThreadBlockOutputShape::kW; // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { mask_.predicates[c] = ((thread_start_column_ + c * ThreadMap::Delta::kContiguous) < extent_column_); } // Null pointer performs no accesses if (!pointer_) { mask_.clear(); } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_load<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } CUTLASS_DEVICE MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_row() const { return thread_start_row_; } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_column() const { return thread_start_column_; } /// Extent of the matrix in rows CUTLASS_DEVICE Index extent_row() const { return extent_row_; } /// Extent of the matrix in columns CUTLASS_DEVICE Index extent_column() const { return extent_column_; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorDirectConv &operator++() { // do nothing return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) const { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h", "repo_id": "cutlass", "token_count": 4873 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element, ///< data type of element to be written typename Layout ///< target shared memory layout > class TileIteratorTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_ ///< data type of element to be written > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorLayout = Layout; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / Policy::kElementsPerAccess) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column() / Policy::kElementsPerAccess}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() / Policy::kElementsPerAccess }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n]; } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { frag_ptr[n] = pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written int InterleavedK ///< number of interleaved k > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::ColumnMajorInterleaved<InterleavedK> > { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<InterleavedK>; using TensorLayout = Layout; ///< shared memory tensor ref layout using TensorRef = TensorRef<Element, TensorLayout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< // Policy::kRowsPerIteration, WarpShape::kM, InterleavedK >; /// This is the fragment size produced by one tile using Fragment = Array< Element, Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction * Policy::kElementsPerIteration>; /// This is the fragment size produced by one iteration // using Fragment = Array< // Element, Policy::kElementsPerIteration >; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerIteration>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object TensorLayout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerIteration }; pointer_ += (layout_({thread_offset_.row(), thread_offset_.column()}) / Policy::kElementsPerAccess); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += (layout_({ coord_offset.row(), coord_offset.column() }) / Policy::kElementsPerAccess); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { ptr[a + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n * Policy::kAccessPerIteration + a]; // printf("store thread %d, address %p, bank %ld\n", threadIdx.x, pointer_+a+n*Detail::kLanesInQuad, // ((long long)(pointer_+a+n*Detail::kLanesInQuad)>>2)&0x1f); } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { frag_ptr[n * Policy::kAccessPerIteration + a] = ptr[a + pointer_offset / Policy::kElementsPerAccess]; } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({0, 1}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written typename Layout_ > class TileIteratorTensorOpCanonical { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = Layout_; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; static int const kAccessSize = 1; static int const kAccessCount = Policy::kElementsPerAccess / kAccessSize; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, kAccessSize>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Guard to indicate whether the shape is divisible bool divisible_; /// Extent of the output tensor MatrixCoord extent_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(true), extent_(WarpShape::kM, WarpShape::kN) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, TensorCoord const &extent, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(false), extent_(extent) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { pointer_[ptr_idx] = frag_ptr[frag_idx]; } } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { frag_ptr[frag_idx] = pointer_[ptr_idx]; } } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h", "repo_id": "cutlass", "token_count": 6931 }
27
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cute/algorithm/functional.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/algorithm/gemm.hpp" #include "cute/tensor_predicate.hpp" #include "cute/numeric/arithmetic_tuple.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { using namespace cute; ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages, class TileShape_, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_> struct CollectiveMma< MainloopSm80CpAsyncUnpredicated<Stages>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_ > { // // Type Aliases // using DispatchPolicy = MainloopSm80CpAsyncUnpredicated<Stages>; using TileShape = TileShape_; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; // Follow the change in TestSmall: TileShape switch to CtaShape // For sm80 arch, CtaShape should euqal to TileShape using CtaShape_MNK = TileShape; static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); using SmemLayoutA = decltype(tile_to_shape( SmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline."); struct SharedStorage { cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a; cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b; }; // Host side kernel arguments struct Arguments { ElementA const* ptr_A; StrideA dA; ElementB const* ptr_B; StrideB dB; }; // Device side kernel params using Params = Arguments; // // Methods // CollectiveMma() = default; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) { (void) workspace; return args; } /// Perform a collective-scoped matrix multiply-accumulate template < class FrgTensorD, class TensorA, class TensorB, class FrgTensorC, class KTileIterator, class ResidueMNK > CUTLASS_DEVICE void operator() ( FrgTensorD &accum, TensorA gA, TensorB gB, FrgTensorC const &src_accum, KTileIterator k_tile_iter, int k_tile_count, ResidueMNK residue_mnk, int thread_idx, char *smem_buf) { using namespace cute; static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident."); static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident."); static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident."); static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutA{}) == 3, "MainloopSm80CpAsync must have a pipeline mode in the smem layout."); static_assert(cute::rank(SmemLayoutB{}) == 3, "MainloopSm80CpAsync must have a pipeline mode in the smem layout."); // Construct shared memory tiles SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf); Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE // Partition the copying of A and B tiles across the threads GmemTiledCopyA gmem_tiled_copy_A; GmemTiledCopyB gmem_tiled_copy_B; auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx); auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx); Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k) Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE) Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k) Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE) // // PREDICATES // (void) residue_mnk; //assert(residue_mnk == make_tuple(0,0,0)); // // PREFETCH // // Start async loads for all pipes but the last CUTLASS_PRAGMA_UNROLL for (int k_pipe = 0; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) { copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe)); copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe)); cp_async_fence(); --k_tile_count; if (k_tile_count > 0) { ++k_tile_iter; } } // // MMA Atom partitioning // // Tile MMA compute thread partitions and allocate accumulators TiledMma tiled_mma; auto thr_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K) CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_A) == size(tiled_mma)); CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_B) == size(tiled_mma)); // // Copy Atom retiling // auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma); auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx); Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE) Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma); auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx); Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE) Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K // // PIPELINED MAIN LOOP // // Current pipe index in smem to read from int smem_pipe_read = 0; // Current pipe index in smem to write to int smem_pipe_write = DispatchPolicy::Stages-1; Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read); Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read); // Size of the register pipeline auto K_BLOCK_MAX = size<2>(tCrA); // PREFETCH register pipeline if (K_BLOCK_MAX > 1) { // Wait until our first prefetched tile is loaded in cp_async_wait<DispatchPolicy::Stages-2>(); __syncthreads(); // Prefetch the first rmem from the first k-tile copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{})); copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{})); } CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > -(DispatchPolicy::Stages-1); --k_tile_count) { // Pipeline the outer products with a static for loop. // // Note, the for_each() function is required here to ensure `k_block` is of type Int<x>. for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block) { if (k_block == K_BLOCK_MAX - 1) { // Slice the smem_pipe_read smem tCsA_p = tCsA(_,_,_,smem_pipe_read); tCsB_p = tCsB(_,_,_,smem_pipe_read); // Commit the smem for smem_pipe_read cp_async_wait<DispatchPolicy::Stages-2>(); __syncthreads(); } // Load A, B shmem->regs for k_block+1 auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next)); copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next)); // Copy gmem to smem before computing gemm on each k-pipe if (k_block == 0) { copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write)); copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write)); cp_async_fence(); if (k_tile_count > 0) { ++k_tile_iter; } // Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe) smem_pipe_write = smem_pipe_read; ++smem_pipe_read; smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read; } // Transform before compute cute::transform(tCrA(_,_,k_block), TransformA{}); cute::transform(tCrB(_,_,k_block), TransformB{}); // Thread-level register gemm for k_block cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum); }); } cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages, class TileShape_, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_ > struct CollectiveMma< MainloopSm80CpAsync<Stages>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_ > { // // Type Aliases // using DispatchPolicy = MainloopSm80CpAsync<Stages>; using TileShape = TileShape_; // Follow the change in TestSmall: TileShape switch to CtaShape // In legacy arch, it should be same using CtaShape_MNK = TileShape; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); using SmemLayoutA = decltype(tile_to_shape( SmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline."); struct SharedStorage { cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a; cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b; }; // Host side kernel arguments struct Arguments { ElementA const* ptr_A; StrideA dA; ElementB const* ptr_B; StrideB dB; }; // Device side kernel params using Params = Arguments; // // Methods // CollectiveMma() = default; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) { (void) workspace; return args; } /// Perform a collective-scoped matrix multiply-accumulate template < class FrgTensorD, class TensorA, class TensorB, class FrgTensorC, class KTileIterator, class ResidueMNK > CUTLASS_DEVICE void operator() ( FrgTensorD &accum, TensorA gA, // (BLK_M, BLK_K, K_TILES) TensorB gB, // (BLK_N, BLK_K, K_TILES) FrgTensorC const &src_accum, KTileIterator k_tile_iter, int k_tile_count, ResidueMNK residue_mnk, int thread_idx, char *smem_buf) { using namespace cute; static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident."); static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident."); static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident."); static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3."); // Construct shared memory tiles SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf); Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE // Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k) // This aligns the tensor with BLK_K for all but the 0th k_tile gA.data() = &gA(0, get<2>(residue_mnk), 0); gB.data() = &gB(0, get<2>(residue_mnk), 0); // Partition the copying of A and B tiles across the threads GmemTiledCopyA gmem_tiled_copy_A; GmemTiledCopyB gmem_tiled_copy_B; auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx); auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx); Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k) Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE) Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k) Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE) // // PREDICATES // // Allocate predicate tensors for m and n Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{}); Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{}); // Construct identity layout for sA and sB Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Repeat the partitioning with identity layouts Tensor tAcA = gmem_thr_copy_A.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tBcB = gmem_thr_copy_B.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Set predicates for m bounds CUTLASS_PRAGMA_UNROLL for (int m = 0; m < size<0>(tApA); ++m) { tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m } // Set predicates for n bounds CUTLASS_PRAGMA_UNROLL for (int n = 0; n < size<0>(tBpB); ++n) { tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n } // // PREFETCH // // Clear the smem tiles to account for predicated off loads clear(tAsA); clear(tBsB); // Start async loads for 0th k-tile, where we take care of the k residue { constexpr int k_pipe = 0; Tensor tAgAk = tAgA(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tAsA); ++k) { if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted) copy_if(gmem_tiled_copy_A, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,k_pipe)); } } Tensor tBgBk = tBgB(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tBsB); ++k) { if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted) copy_if(gmem_tiled_copy_B, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,k_pipe)); } } cp_async_fence(); ++k_tile_iter; --k_tile_count; } // Start async loads for 1st k-tile onwards, no k-residue handling needed CUTLASS_PRAGMA_UNROLL for (int k_pipe = 1; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) { if (k_tile_count <= 0) { clear(tApA); clear(tBpB); } copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe)); // CpAsync copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe)); // CpAsync cp_async_fence(); ++k_tile_iter; --k_tile_count; } // // MMA Atom partitioning // // Tile MMA compute thread partitions and allocate accumulators TiledMma tiled_mma; auto thr_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K) CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K // // Copy Atom retiling // auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma); auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx); Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE) Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma); auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx); Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE) Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K // // PIPELINED MAIN LOOP // // Current pipe index in smem to read from int smem_pipe_read = 0; // Current pipe index in smem to write to int smem_pipe_write = DispatchPolicy::Stages-1; Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read); Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read); // Size of the register pipeline auto K_BLOCK_MAX = size<2>(tCrA); // PREFETCH register pipeline if (K_BLOCK_MAX > 1) { // Wait until our first prefetched tile is loaded in cp_async_wait<DispatchPolicy::Stages-2>(); __syncthreads(); // Prefetch the first rmem from the first k-tile copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{})); copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{})); } CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > -(DispatchPolicy::Stages-1); --k_tile_count) { // Pipeline the outer products with a static for loop. // // Note, the for_each() function is required here to ensure `k_block` is of type Int<N>. for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block) { if (k_block == K_BLOCK_MAX - 1) { // Slice the smem_pipe_read smem tCsA_p = tCsA(_,_,_,smem_pipe_read); tCsB_p = tCsB(_,_,_,smem_pipe_read); // Commit the smem for smem_pipe_read cp_async_wait<DispatchPolicy::Stages-2>(); __syncthreads(); } // Load A, B shmem->regs for k_block+1 auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next)); copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next)); // Copy gmem to smem before computing gemm on each k-pipe if (k_block == 0) { // Set all predicates to false if we are going to overshoot bounds if (k_tile_count <= 0) { clear(tApA); clear(tBpB); } copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write)); copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write)); cp_async_fence(); ++k_tile_iter; // Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe) smem_pipe_write = smem_pipe_read; ++smem_pipe_read; smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read; } // Transform before compute cute::transform(tCrA(_,_,k_block), TransformA{}); cute::transform(tCrB(_,_,k_block), TransformB{}); // Thread-level register gemm for k_block cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum); }); } cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/collective/sm80_mma_multistage.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/sm80_mma_multistage.hpp", "repo_id": "cutlass", "token_count": 13161 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined RankK kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/rank_k_universal.h" #include "cutlass/gemm/kernel/default_rank_k_universal.h" #include "cutlass/gemm/device/default_gemm_configuration.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassTensorOp, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm80, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::kAlignmentA, /// If true, kernel supports split-K with serial reduction bool SplitKSerial = false, /// Operation performed by SYRK typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::Operator, /// Complex elementwise transformation ComplexTransform TransformA = ComplexTransform::kNone, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ = BlasMode::kSymmetric> class RankK { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using ElementC = ElementC_; using LayoutC = LayoutC_; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static FillMode const kFillModeC = FillModeC; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static ComplexTransform const kTransformA = TransformA; static BlasMode const kBlasMode = BlasMode_; static int const kUpdateRank = 1; /// Define the kernel using RankKkernel = typename kernel::DefaultRankKUniversal< ElementA, LayoutA, kTransformA, kAlignmentA, ElementC, LayoutC, kFillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kSplitKSerial, Operator, kBlasMode >::RankKkernel; using Arguments = typename RankKkernel::Arguments; private: /// Kernel parameters object typename RankKkernel::Params params_; public: /// Constructs the SYRK. RankK() { } /// Determines whether the SYRK can execute the given problem. static Status can_implement(Arguments const &args) { if (!kSplitKSerial && args.batch_count > 1) { return Status::kErrorInvalidProblem; } Status status = RankKkernel::can_implement(args); if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) { return Status::kErrorInvalidProblem; } if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count); if (kSplitKSerial && args.batch_count > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } /// Initializes SYRK state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count); if (kSplitKSerial) { if (args.batch_count > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.batch_count > 1) { return Status::kErrorInvalidProblem; } } int gemm_k_size = args.problem_size.k(); // Initialize the Params structure params_ = typename RankKkernel::Params{ args, grid_tiled_shape, gemm_k_size, static_cast<int *>(workspace) }; int smem_size = int(sizeof(typename RankKkernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<RankKkernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { if (kSplitKSerial && args.batch_count > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } } size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes && !workspace) { return Status::kErrorWorkspaceNull; } params_.update(args, workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(RankKkernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename RankKkernel::SharedStorage)); cutlass::Kernel<RankKkernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for column-major output exchange operand. template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for C and D matrix operands typename ElementC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator_, /// Operator class tag typename OperatorClass_, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_, /// Warp-level tile size (concept: GemmShape) typename WarpShape_, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_, /// Epilogue output operator typename EpilogueOutputOp_, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_, /// Number of stages used in the pipelined mainloop int Stages, /// Access granularity of A matrix in units of elements int AlignmentA, /// If true, kernel supports split-K with serial reduction bool SplitKSerial, /// Operation performed by RankK update kernel typename Operator_, /// Complex elementwise transformation ComplexTransform TransformA, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ > class RankK<ElementA_, LayoutA_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, SplitKSerial, Operator_, TransformA, BlasMode_> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static FillMode const kFillModeC = FillModeC; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static BlasMode const kBlasMode = BlasMode_; static int const kUpdateRank = 1; // Complex transform for input A matrices (function on input layout) static ComplexTransform const kTransformA = TransformA; /// Define the kernel using UnderlyingOperator = typename cutlass::gemm::device::RankK< ElementA, LayoutA, ElementC, layout::RowMajor, InvertFillMode<FillModeC>::mode, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kAlignmentA, kSplitKSerial, Operator, kTransformA, kBlasMode >; /// Argument structure using Arguments = typename UnderlyingOperator::Arguments; using RankKkernel = typename UnderlyingOperator::RankKkernel; private: UnderlyingOperator underlying_operator_; public: /// Constructs the RankK. RankK() { } /// Helper to construct a transposed equivalent for the underying RankK operator static Arguments to_underlying_arguments(Arguments const &args) { return args; } /// Determines whether the RankK can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes RankK state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace RankK } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/rank_k.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/rank_k.h", "repo_id": "cutlass", "token_count": 5606 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default sparse GEMM with visitor. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/wmma.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm.h" #include "cutlass/gemm/kernel/default_gemm_sparse.h" #include "cutlass/gemm/kernel/sparse_gemm_with_visitor.h" #include "cutlass/gemm/kernel/gemm_pipelined.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h" #include "cutlass/gemm/threadblock/default_sparse_mma.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" #endif //CUTLASS_ARCH_WMMA_ENABLED //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename FusionCallbacks, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Operation performed by GEMM typename Operator, /// Number of stages used in the pipelined epilogue int EpilogueStages = 1> struct DefaultSparseGemmWithVisitor; //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename FusionCallbacks, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Operation performed by GEMM typename Operator, /// Number of stages used in the pipelined epilogue int EpilogueStages> struct DefaultSparseGemmWithVisitor<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, FusionCallbacks, ThreadblockSwizzle, Stages, Operator, EpilogueStages> { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static constexpr int kAlignmentC = 128 / sizeof_bits<ElementC>::value;; using ElementEpilogue = ElementAccumulator; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; using EpilogueOutputOp = typename epilogue::thread::LinearCombination< ElementC, kAlignmentC, ElementAccumulator, ElementEpilogue>; using BaseEpilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount>::Epilogue; // Define epilogue using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks< BaseEpilogue, FusionCallbacks, EpilogueStages>; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::SparseGemmWithEpilogueVisitor<Mma, Epilogue, ThreadblockSwizzle>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h", "repo_id": "cutlass", "token_count": 2603 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Base scheduler for grouped problems */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Enumerated type describing the type of scheduling to perform for the ProblemVisitor enum class GroupScheduleMode { // Perform all scheduling on device kDeviceOnly, // Precompute on the host the full sequence of problems to access kHostPrecompute }; /// Visitor class to abstract away the algorithm for iterating over tiles template <typename ProblemSizeHelper, typename ThreadblockShape_> struct BaseGroupedProblemVisitor { using ThreadblockShape = ThreadblockShape_; struct ProblemInfo { static int32_t const kNoPrefetchEntry = -1; int32_t problem_idx; int32_t problem_start; CUTLASS_DEVICE ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {} CUTLASS_DEVICE ProblemInfo(int32_t problem_idx_, int32_t problem_start_) : problem_idx(problem_idx_), problem_start(problem_start_) {} }; struct Params { cutlass::gemm::GemmCoord const *problem_sizes; int32_t problem_count; void const *workspace; int32_t tile_count; // // Methods // /// Ctor CUTLASS_HOST_DEVICE Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { } /// Ctor CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const *problem_sizes, int32_t problem_count, void const *workspace = nullptr, int32_t tile_count = 0 ): problem_sizes(problem_sizes), problem_count(problem_count), workspace(workspace), tile_count(tile_count) {} }; Params params; int32_t tile_idx; int32_t problem_tile_start; int32_t problem_idx; // // Methods // CUTLASS_DEVICE BaseGroupedProblemVisitor( Params const &params_, int32_t block_idx ): params(params_), tile_idx(block_idx), problem_tile_start(0), problem_idx(0) {} /// Get the grid shape CUTLASS_HOST_DEVICE static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { return ProblemSizeHelper::grid_shape(problem); } /// Gets the global tile index CUTLASS_HOST_DEVICE int32_t tile_index() const { return tile_idx; } /// Gets the index of the problem CUTLASS_HOST_DEVICE int32_t problem_index() const { return problem_idx; } CUTLASS_HOST_DEVICE int32_t threadblock_idx() const { return tile_idx - problem_tile_start; } CUTLASS_DEVICE void advance(int32_t grid_size) { tile_idx += grid_size; } CUTLASS_HOST_DEVICE static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) { ProblemSizeHelper::possibly_transpose_problem(problem); } /// Returns the problem size for the current problem CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord problem_size() const { GemmCoord problem = params.problem_sizes[problem_idx]; ProblemSizeHelper::possibly_transpose_problem(problem); return problem; } CUTLASS_HOST_DEVICE static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { return ProblemSizeHelper::tile_count(grid); } static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) { int32_t total_tiles = 0; for (int32_t i = 0; i < problem_count; ++i) { auto problem = host_problem_sizes_ptr[i]; possibly_transpose_problem(problem); auto grid = grid_shape(problem); total_tiles += tile_count(grid); } return total_tiles; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ProblemSizeHelper, typename ThreadblockShape, GroupScheduleMode GroupScheduleMode_, int PrefetchTileCount, int ThreadCount > struct GroupedProblemVisitor; ///////////////////////////////////////////////////////////////////////////////////////////////// // ProblemVisitor that performs all scheduling on device // template <typename ProblemSizeHelper, typename ThreadblockShape, int PrefetchTileCount, int ThreadCount> struct GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode::kDeviceOnly, PrefetchTileCount, ThreadCount>: public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> { using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>; using Params = typename Base::Params; static int const kThreadCount = ThreadCount; static bool const kRequiresPrecomputation = false; static int const kThreadsPerWarp = 32; struct SharedStorage {}; // Final tile of the problem loaded by this thread. Each thread will hold // a separate value. int32_t problem_ending_tile; SharedStorage &shared_storage; // // Methods // CUTLASS_DEVICE GroupedProblemVisitor( Params const &params_, SharedStorage &shared_storage_, int32_t block_idx ): Base(params_, block_idx), problem_ending_tile(0), shared_storage(shared_storage_) { this->problem_idx = -1 * kThreadsPerWarp; this->problem_tile_start = 0; } CUTLASS_DEVICE bool next_tile() { // Check whether the tile to compute is within the range of the current problem. int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp); if (this->tile_idx < problem_tile_end) { return true; } // Check whether the tile to compute is within the current group of problems fetched by the warp. // The last tile for this group is the final tile of the problem held by the final thread in the warp. int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); // Keep the starting problem for this group in `problem_idx`. This is done to reduce // register pressure. The starting problem for this group is simply the first problem // in the group most recently fetched by the warp. int32_t &group_problem_start = this->problem_idx; group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp; // Keep the starting tile for this group in `problem_tile_start`. This is done to reduce // register pressure. int32_t &group_tile_start = this->problem_tile_start; // Each thread in the warp processes a separate problem to advance until // reaching a problem whose starting tile is less less than tile_idx. while (group_tile_end <= this->tile_idx) { group_problem_start += kThreadsPerWarp; if (group_problem_start > this->params.problem_count) { return false; } // Since `group_tile_start` is a reference to `this->problem_tile_start`, this // also sets `this->problem_tile_start`. The fact that `this->problem_tile_start` // is also set here is used later in `next_tile`. group_tile_start = group_tile_end; int lane_idx = threadIdx.x % kThreadsPerWarp; int32_t lane_problem = group_problem_start + lane_idx; // Compute the number of tiles in the problem assigned to each thread. problem_ending_tile = 0; if (lane_problem < this->params.problem_count) { cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem]; this->possibly_transpose_problem(problem); cutlass::gemm::GemmCoord grid = this->grid_shape(problem); problem_ending_tile = this->tile_count(grid); } // Compute a warp-wide inclusive prefix sum to compute the ending tile index of // each thread's problem. CUTLASS_PRAGMA_UNROLL for (int i = 1; i < kThreadsPerWarp; i <<= 1) { int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i); if (lane_idx >= i) { problem_ending_tile += val; } } // The total tile count for this group is now in the final position of the prefix sum int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); problem_ending_tile += group_tile_start; group_tile_end += tiles_in_group; } // The next problem to process is the first one that does not have ending tile position // that is greater than or equal to tile index. int32_t problem_idx_in_group = __popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx)); this->problem_idx = group_problem_start + problem_idx_in_group; // The starting tile for this problem is the ending tile of the previous problem. In cases // where `problem_idx_in_group` is the first problem in the group, we do not need to reset // `problem_tile_start`, because it is set to the previous group's ending tile in the while // loop above. if (problem_idx_in_group > 0) { this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1); } return true; } static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count) { return 0; } static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count, void* host_workspace_ptr) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Precomputes schedule on host and prefetches into shared memory // template <typename ProblemSizeHelper, typename ThreadblockShape, int PrefetchTileCount, int ThreadCount> struct GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode::kHostPrecompute, PrefetchTileCount, ThreadCount> : public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> { static_assert(PrefetchTileCount > 0, "GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory"); using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>; using Params = typename Base::Params; using ProblemInfo = typename Base::ProblemInfo; static bool const kRequiresPrecomputation = true; static int const kPrefetchTileCount = PrefetchTileCount; static int const kThreadCount = ThreadCount; struct SharedStorage { // Sequence of problem IDs and starting tiles to compute cutlass::Array<ProblemInfo, kPrefetchTileCount> prefetched_problems; }; int32_t tiles_computed; int32_t iterations_per_block; int32_t block_load_start; SharedStorage &shared_storage; ProblemInfo const *problem_info_ptr; // // Methods // CUTLASS_DEVICE GroupedProblemVisitor( Params const &params_, SharedStorage &shared_storage_, int32_t block_idx ): Base(params_, block_idx), tiles_computed(0), shared_storage(shared_storage_), problem_info_ptr(reinterpret_cast<ProblemInfo const*>(params_.workspace)) { iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x; block_load_start = iterations_per_block * block_idx; // Start prefetching the first set of tiles to compute prefetch_tiles(); } CUTLASS_DEVICE bool next_tile() { if (this->tile_idx >= this->params.tile_count) { return false; } int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount); if (prefetch_idx == 0) { // Ensure all previous stores to shared memory have been completed __syncthreads(); } auto problem_info = shared_storage.prefetched_problems[prefetch_idx]; ++tiles_computed; if ((tiles_computed % kPrefetchTileCount) == 0) { // Begin prefetching next set of tiles. Synchronize first to ensure that // we don't overwrite the current buffer while someone else is using it. __syncthreads(); prefetch_tiles(); } this->problem_idx = problem_info.problem_idx; this->problem_tile_start = problem_info.problem_start; return true; } static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count) { int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count); return sizeof(ProblemInfo) * entries_per_block * block_count; } #if !defined(__CUDACC_RTC__) static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count, void* host_workspace_ptr) { ProblemInfo* host_problem_info_ptr = reinterpret_cast<ProblemInfo*>(host_workspace_ptr); int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count; int tile = 0; int start_tile = 0; for (int p_idx = 0; p_idx < problem_count; ++p_idx) { auto problem = host_problem_sizes_ptr[p_idx]; Base::possibly_transpose_problem(problem); auto grid = Base::grid_shape(problem); int tiles = Base::tile_count(grid); ProblemInfo problem_info(p_idx, start_tile); for (int i = 0; i < tiles; ++i, ++tile) { host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info; } start_tile += tiles; } } #endif private: CUTLASS_DEVICE void prefetch_tiles() { CUTLASS_PRAGMA_UNROLL for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) { int32_t offset = threadIdx.x + i; if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) { shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset]; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h", "repo_id": "cutlass", "token_count": 6168 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default template for a Blocked-Ell MMA. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/wmma.h" #include "cutlass/layout/matrix.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/gemm/threadblock/default_mma_core_wmma.h" #endif //CUTLASS_ARCH_WMMA_ENABLED #include "cutlass/gemm/threadblock/ell_mma_pipelined.h" #include "cutlass/gemm/threadblock/ell_mma_multistage.h" #include "cutlass/transform/threadblock/ell_predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator_, /// Layout type for C and D matrix operands typename LayoutC_, /// Operator class tag typename OperatorClass_, /// Tag indicating architecture to tune for typename ArchTag_, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_, /// Warp-level tile size (concept: GemmShape) typename WarpShape_, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_, /// Number of stages used in the pipelined mainloop int Stages, /// Operation perfomed by GEMM typename Operator, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false > struct DefaultEllMma; //////////////////////////////////////////////////////////////////////////////// /// Specialization for row-major output (OperatorClass Simt) template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator> struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, Operator>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for row-major output (OperatorClass TensorOp) template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator > struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, Operator>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for row-major output (OperatorClass TensorOp) template < /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator > struct DefaultEllMma<float, LayoutA, kAlignmentA, float, LayoutB, kAlignmentB, float, layout::RowMajor, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float, LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2, arch::OpMultiplyAddFastF16>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, float, layout::RowMajor, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for column-major-interleaved output template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator, /// Number of Interleaved K int InterleavedK> struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, true> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator, true>; static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value, "Alignment must match thread data map's vector length"); static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value, "Alignment must match thread data map's vector length"); // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for row-major output template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Number of stages used in the multistage mainloop int Stages, /// Operation perfomed by GEMM typename Operator > struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, Operator>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>; using IteratorA = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>; using IteratorB = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; // Define the threadblock-scoped multistage matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy, Stages>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for row-major output (OperatorClass TensorOp) template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Number of stages used in the multistage mainloop int Stages, /// Operation perfomed by GEMM typename Operator > struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator, false> { static cutlass::arch::CacheOperation::Kind const CacheOpA = ((sizeof_bits<ElementA>::value * kAlignmentA) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * kAlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, Operator, false, CacheOpA, CacheOpB>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>; using IteratorA = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>; using IteratorB = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; // Define the threadblock-scoped multistage matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy, Stages>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for column-major-interleaved output template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Number of stages used in the multistage mainloop int Stages, /// Operation performed by GEMM typename Operator, /// Number of Interleaved K int InterleavedK> struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator, true> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages, Operator, true>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>; using IteratorA = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>; using IteratorB = cutlass::transform::threadblock::EllPredicatedTileAccessIterator< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; // Define the threadblock-scoped multistage matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy, Stages>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for SIMT IDP4A Kernels template < /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Operation performed by GEMM typename Operator, /// Warp-level tile size (concept: GemmShape) typename WarpShape> struct DefaultEllMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2, Operator, false> { using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using ElementB = int8_t; using OperatorClass = arch::OpClassSimt; static const bool transposeA = cutlass::platform::is_same< LayoutA, layout::ColumnMajor >::value; static const bool transposeB = cutlass::platform::is_same< LayoutB, layout::RowMajor >::value; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, 2, Operator>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, layout::RowMajor, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_ARCH_WMMA_ENABLED) /// Specialization for Wmma TensorOp operator with 2 staged pipeline template < ///< Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Layout type for C and D matrix operands typename LayoutC, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator> struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassWmmaTensorOp, 2, Operator>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; // Define the threadblock-scoped pipelined matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, LayoutC, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// /// Specialization for Wmma TensorOp operator with 1 staged pipeline template < ///< Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for internal accumulation typename ElementAccumulator, /// Layout type for C and D matrix operands typename LayoutC, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape, /// Operation performed by GEMM typename Operator> struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, 1, Operator, false> { // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassWmmaTensorOp, 1, Operator>; // Define iterators over tiles from the A operand using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; // Define iterators over tiles from the B operand using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; // Define the threadblock-scoped singlestage matrix multiply using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, LayoutC, typename MmaCore::MmaPolicy>; }; //////////////////////////////////////////////////////////////////////////////// #endif //CUTLASS_ARCH_WMMA_ENABLED } // namespace threadblock } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h", "repo_id": "cutlass", "token_count": 10635 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level per channel scale+bias+relu before matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename FragmentActivations, typename FragmentVarMean, typename FragmentGammaBeta> struct LayernormScaleBiasTransform { using T = typename FragmentActivations::Element; static int const NumActivations = FragmentActivations::kElements; static int const NumVarMean = FragmentVarMean::kElements; static int const NumGammaBeta = FragmentGammaBeta::kElements; static int const MmaElements = 2; // One element has one scale and one bias static int const MmaScaleBiasPair = 2; // 16816 has 2 columns and 2 rows static int const MmaCols = 2; static int const MmaRows = 2; using MmaOperand = Array<T, MmaElements>; using VarMeanOperand = Array<__half2, MmaScaleBiasPair>; using GammaBetaOperand = Array<T, MmaElements * MmaScaleBiasPair>; CUTLASS_DEVICE void transform(MmaOperand &activations, VarMeanOperand const &var_mean, GammaBetaOperand const &gamma_beta) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations); uint32_t const *ptr_var_mean = reinterpret_cast<uint32_t const *>(&var_mean); uint32_t const *ptr_gamma_beta = reinterpret_cast<uint32_t const *>(&gamma_beta); // Apply per channel scale+bias+relu if the data is not a special NaN // (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0. // We assumes the pair of FP16 are either both inbound or both out-of-bound. // It requires C to be an even number. asm volatile( "{\n\t" " fma.rn.f16x2 %0, %1, %2, %3;\n" " fma.rn.f16x2 %0, %4, %0, %5;\n" "}\n" : "=r"(ptr_activations[0]) : "r"(ptr_var_mean[0]), "r"(ptr_activations[0]), "r"(ptr_var_mean[1]), "r"(ptr_gamma_beta[0]), "r"(ptr_gamma_beta[1])); #else assert(0); #endif } CUTLASS_DEVICE void operator()(FragmentActivations &activations, FragmentVarMean const &var_mean, FragmentGammaBeta const &gamma_beta) { MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations); VarMeanOperand const *ptr_var_mean = reinterpret_cast<VarMeanOperand const *>(&var_mean); GammaBetaOperand const *ptr_gamma_beta = reinterpret_cast<GammaBetaOperand const *>(&gamma_beta); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < (NumActivations / MmaElements); ++i) { transform(ptr_activations[i], ptr_var_mean[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows], ptr_gamma_beta[(i / MmaScaleBiasPair) % MmaCols]); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h", "repo_id": "cutlass", "token_count": 1939 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a class for using integer types smaller than one byte in host or device code. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <cstdint> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_size.h" #include "cutlass/platform/platform.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// template <int Bits, bool Signed = true> struct integer_subbyte { /// Storage type using Storage = uint8_t; /// Number of bits static_assert(Bits <= 8*sizeof(Storage), "Require a subbyte of bits in integer_subbyte"); /// External type using xint_t = typename platform::conditional<Signed, int, unsigned>::type; /// Bitmask for truncation from larger integers static constexpr Storage bits_mask_ = Storage(Storage(-1) >> (8 - Bits)); /// Bitmask for the sign bit static constexpr Storage sign_mask_ = Storage((Signed ? 1 : 0) << (Bits - 1)); // // Data members // Storage storage; // // Methods // /// No operation integer_subbyte() = default; /// Conversion from integer type CUTLASS_HOST_DEVICE explicit integer_subbyte(int value) : storage(reinterpret_cast<Storage const&>(value) & bits_mask_) {} CUTLASS_HOST_DEVICE explicit integer_subbyte(unsigned value) : storage(reinterpret_cast<Storage const&>(value) & bits_mask_) {} CUTLASS_HOST_DEVICE explicit integer_subbyte(double value) { xint_t tmp = static_cast<xint_t>(value); storage = reinterpret_cast<Storage const &>(tmp) & bits_mask_; } /// Convert to int or unsigned CUTLASS_HOST_DEVICE operator xint_t() const { if (sign_mask_ & storage) { // Sign extend return xint_t(storage) | ~xint_t(bits_mask_); } else { return xint_t(storage); } } /// Equality CUTLASS_HOST_DEVICE bool operator==(integer_subbyte const& rhs) const { return storage == rhs.storage; } /// Inequality CUTLASS_HOST_DEVICE bool operator!=(integer_subbyte const& rhs) const { return storage != rhs.storage; } /// Less than or equal CUTLASS_HOST_DEVICE bool operator<=(integer_subbyte const& rhs) const { if (sign_mask_ & storage) { return !(rhs.storage < storage); } else { return storage <= rhs.storage; } } /// Less than CUTLASS_HOST_DEVICE bool operator<(integer_subbyte const& rhs) const { if (sign_mask_ & storage) { return !(rhs.storage <= storage); } else { return storage < rhs.storage; } } /// Greater than or equal CUTLASS_HOST_DEVICE bool operator>=(integer_subbyte const& rhs) const { return !(*this < rhs); } /// Greater than CUTLASS_HOST_DEVICE bool operator>(integer_subbyte const& rhs) const { return !(*this <= rhs); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// 1-bit Unsigned integer type using uint1b_t = integer_subbyte<1, false>; /// 2-bit Integer type using int2b_t = integer_subbyte<2, true>; /// 2-bit Unsigned integer type using uint2b_t = integer_subbyte<2, false>; /// 4-bit Integer type using int4b_t = integer_subbyte<4, true>; /// 4-bit Unsigned integer type using uint4b_t = integer_subbyte<4, false>; /// 1-bit binary type using bin1_t = bool; /////////////////////////////////////////////////////////////////////////////////////////////////// template <int Bits, bool Signed> struct sizeof_bits<integer_subbyte<Bits,Signed>> { static constexpr int value = Bits; }; /// Defines the size of an element in bits - specialized for bin1_t template <> struct sizeof_bits<bin1_t> { static constexpr int value = 1; }; /////////////////////////////////////////////////////////////////////////////////////////////////// namespace platform { template <> struct numeric_limits<cutlass::int4b_t> { CUTLASS_HOST_DEVICE static cutlass::int4b_t const lowest() noexcept { return int4b_t{-8};} CUTLASS_HOST_DEVICE static cutlass::int4b_t const max() noexcept { return int4b_t{7};} CUTLASS_HOST_DEVICE static cutlass::int4b_t const min() noexcept { return lowest();} static constexpr bool is_integer = true; static constexpr bool is_signed = true; }; template <> struct numeric_limits<cutlass::uint4b_t> { CUTLASS_HOST_DEVICE static cutlass::uint4b_t const lowest() noexcept { return uint4b_t{0};} CUTLASS_HOST_DEVICE static cutlass::uint4b_t const max() noexcept { return uint4b_t{15};} CUTLASS_HOST_DEVICE static cutlass::uint4b_t const min() noexcept { return lowest();} static constexpr bool is_integer = true; static constexpr bool is_signed = false; }; template <> struct numeric_limits<cutlass::uint1b_t> { CUTLASS_HOST_DEVICE static cutlass::uint1b_t const lowest() noexcept { return uint1b_t{0};} CUTLASS_HOST_DEVICE static cutlass::uint1b_t const max() noexcept { return uint1b_t{1};} CUTLASS_HOST_DEVICE static cutlass::uint1b_t const min() noexcept { return lowest();} static constexpr bool is_integer = true; static constexpr bool is_signed = false; }; template <> struct numeric_limits<cutlass::int2b_t> { CUTLASS_HOST_DEVICE static cutlass::int2b_t lowest() noexcept { return int2b_t{-2}; } CUTLASS_HOST_DEVICE static cutlass::int2b_t min() noexcept { return lowest(); } CUTLASS_HOST_DEVICE static cutlass::int2b_t max() noexcept { return int2b_t{1}; } static constexpr bool is_integer = true; static constexpr bool is_signed = true; }; template <> struct numeric_limits<cutlass::uint2b_t> { CUTLASS_HOST_DEVICE static cutlass::uint2b_t const lowest() noexcept { return uint2b_t{0}; } CUTLASS_HOST_DEVICE static cutlass::uint2b_t const min() noexcept { return lowest(); } CUTLASS_HOST_DEVICE static cutlass::uint2b_t const max() noexcept { return uint2b_t{3}; } static constexpr bool is_integer = true; static constexpr bool is_signed = false; }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace platform } // namespace cutlass
cutlass/include/cutlass/integer_subbyte.h/0
{ "file_path": "cutlass/include/cutlass/integer_subbyte.h", "repo_id": "cutlass", "token_count": 2547 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Boost-like numeric conversion operator for CUTLASS numeric types */ #pragma once #if !defined(__CUDACC_RTC__) #include <cfenv> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/thread/unary_op.h" #include "cutlass/array.h" #include "cutlass/half.h" namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Floating-point rounding style similare to Standard Library's formats but supporting /// additional rounding options. enum class FloatRoundStyle { round_indeterminate, ///< rounding mode unknown round_toward_zero, ///< round toward zero round_to_nearest, ///< round to nearest even round_to_nearest_satfinite, ///< round to nearest even, capping value to min and max of destination type round_toward_infinity, ///< round toward infinity round_toward_neg_infinity, ///< round toward negative infinity round_half_ulp_truncate, ///< add 0.5ulp to integer representation then round toward zero round_half_ulp_trunc_dntz ///< like round_half_ulp_truncate, except denorms are rounded *toward* zero }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename T, typename S, FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > struct NumericConverter { using result_type = T; using source_type = S; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return static_cast<result_type>(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for float => int32_t // ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) template <> struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> { using result_type = int32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_DEVICE static result_type convert(source_type const & s) { return __float2int_rn(s); } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> { using result_type = int32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; CUTLASS_DEVICE static result_type convert(source_type const & s) { return __float2int_rz(s); } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #elif !defined(__CUDACC_RTC__) template <> struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> { using result_type = int32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; static result_type convert(source_type const & s) { std::fesetround(FE_TONEAREST); return (result_type)std::nearbyint(s); } result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> { using result_type = int32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; static result_type convert(source_type const & s) { std::fesetround(FE_TOWARDZERO); return (result_type)std::nearbyint(s); } result_type operator()(source_type const &s) const { return convert(s); } }; #endif ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for float => int8_t // ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) template <> struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> { using result_type = int8_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_DEVICE static result_type convert(source_type const & s) { int32_t intermediate; asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s)); return static_cast<result_type>(intermediate); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> { using result_type = int8_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; CUTLASS_DEVICE static result_type convert(source_type const & s) { int32_t intermediate; asm volatile("cvt.rzi.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s)); return static_cast<result_type>(intermediate); } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #elif !defined(__CUDACC_RTC__) template <> struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> { using result_type = int8_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; static result_type convert(source_type const & s) { std::fesetround(FE_TONEAREST); int32_t intermediate = (int32_t)std::nearbyint(s); // Low-end saturation intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest()); // High-end saturation intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max()); return static_cast<result_type>(intermediate); } result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> { using result_type = int8_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; static result_type convert(source_type const & s) { std::fesetround(FE_TOWARDZERO); int32_t intermediate = (int32_t)std::nearbyint(s); // Low-end saturation intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest()); // High-end saturation intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max()); return static_cast<result_type>(intermediate); } result_type operator()(source_type const &s) const { return convert(s); } }; #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float <= cutlass::half_t template <typename T, FloatRoundStyle Round> struct NumericConverter<T, T, Round> { using result_type = T; using source_type = T; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return s; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for float <=> cutlass::half_t // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float <= cutlass::half_t template <FloatRoundStyle Round> struct NumericConverter<float, cutlass::half_t, Round> { using result_type = float; using source_type = cutlass::half_t; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { result_type result = static_cast<float>(s); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Specialization for round-to-nearest template <> struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_to_nearest> { using result_type = cutlass::half_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { result_type result = static_cast<cutlass::half_t>(s); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Specialization for round-toward-zero template <> struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_toward_zero> { using result_type = cutlass::half_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; /// Round toward zero CUTLASS_HOST_DEVICE static result_type convert(source_type const & flt) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) return cutlass::half_t(__float2half_rz(flt)); #else // software implementation rounds toward nearest even unsigned const& s = reinterpret_cast<unsigned const &>(flt); uint16_t sign = uint16_t((s >> 16) & 0x8000); int32_t exp = int32_t((s >> 23) & 0xff) - 127; int mantissa = s & 0x7fffff; uint16_t u = 0; if ((s & 0x7fffffff) == 0) { // sign-preserving zero return cutlass::half_t::bitcast(sign); } if (exp > 15) { if (exp == 128 && mantissa) { // not a number u = 0x7fff; } else { // overflow to infinity u = sign | 0x7c00; } return cutlass::half_t::bitcast(u); } if (exp >= -14) { // normal fp32 to normal fp16 u = uint16_t((uint32_t(exp + 15) & 0x1f) << 10); u = uint16_t(u | (mantissa >> 13)); } else { // normal single-precision to subnormal cutlass::half_t-precision representation int rshift = (-14 - exp); if (rshift < 32) { mantissa |= (1 << 23); mantissa = (mantissa >> rshift); u = (uint16_t(mantissa >> 13) & 0x3ff); } else { mantissa = 0; u = 0; } } u |= sign; return cutlass::half_t::bitcast(u); #endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for float <=> cutlass::bfloat16_t // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float <= cutlass::bfloat16_t template <FloatRoundStyle Round> struct NumericConverter<float, cutlass::bfloat16_t, Round> { using result_type = float; using source_type = cutlass::bfloat16_t; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return static_cast<float>(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_to_nearest> { using result_type = cutlass::bfloat16_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return static_cast<cutlass::bfloat16_t>(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_half_ulp_truncate> { using result_type = cutlass::bfloat16_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { uint32_t x32 = reinterpret_cast<uint32_t const &>(s); #if defined(__CUDA_ARCH__) if (::isfinite(s)) { x32 += 0x8000; } #else if (std::isfinite(s)) { x32 += 0x8000; } #endif uint16_t x16 = uint16_t((x32 >> 16) & 0xffff); return cutlass::bfloat16_t::bitcast(x16); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_toward_zero> { using result_type = cutlass::bfloat16_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { uint32_t x32 = reinterpret_cast<uint32_t const &>(s); uint16_t x16 = uint16_t(x32 >> 16); return cutlass::bfloat16_t::bitcast(x16); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for float <=> cutlass::tfloat32_t // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float <= cutlass::tfloat32_t template <FloatRoundStyle Round> struct NumericConverter<float, cutlass::tfloat32_t, Round> { using result_type = float; using source_type = cutlass::tfloat32_t; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return static_cast<float>(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_to_nearest> { using result_type = cutlass::tfloat32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { unsigned storage = reinterpret_cast<unsigned const &>(s); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900 asm volatile("cvt.rn.tf32.f32 %0, %1;" : "=r"(storage) : "r"(storage)); #else if ((storage & 0x7f800000) != 0x7f800000) { bool mantissa_bit = ((storage & (1 << 13)) != 0); bool round_bit = ((storage & (1 << 12)) != 0); bool sticky_bit = ((storage & ((1 << 12) - 1)) != 0); if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) { storage += uint32_t(1 << 13); } // Note, the following is intentionally commented out. TF32 // does not define the low order bits, so they may be left in // an undefined state. // // By not truncating these bit explicitly, we avoid an extra logical // operation. // // TF32 may be implicitly converted to float by performing this // operation as needed. // // storage = (storage & ~0x1fff); } else if (storage & ~0xff800000) { storage = 0x7fffffff; } #endif return cutlass::tfloat32_t::bitcast(storage); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_truncate> { using result_type = cutlass::tfloat32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { return cutlass::tfloat32_t::round_half_ulp_truncate(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// This rounding operation is similar to half_ulp_truncate except it rounds denorms toward zero. /// It avoids predicated code, though it requires a temporary register. template <> struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_trunc_dntz> { using result_type = cutlass::tfloat32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_trunc_dntz; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { unsigned y = reinterpret_cast<unsigned const &>(s); y = y & 0xff800000; float d = reinterpret_cast<float const &>(y); float z = d / float(1 << 11) + s; return reinterpret_cast<result_type const &>(z); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template <> struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_toward_zero> { using result_type = cutlass::tfloat32_t; using source_type = float; static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { uint32_t x = reinterpret_cast<uint32_t const &>(s); return cutlass::tfloat32_t::bitcast(x & 0xffffe000); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Conversion operator for float to cutlass::tfloat32_t big and small values // ///////////////////////////////////////////////////////////////////////////////////////////////// template < FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero, FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate > struct NumericConverterFastF32 { // result_type holds big cutlass::tfloat32_t at idx(0) and small cutlass::tfloat32_t at idx(1) using result_type = Array<cutlass::tfloat32_t, 2>; // source data type using source_type = float; // rounding styles for big and small part static FloatRoundStyle const kRoundBig = RoundBig; static FloatRoundStyle const kRoundSmall = RoundSmall; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { result_type result; NumericConverter<cutlass::tfloat32_t, float, kRoundBig> convert_big_; NumericConverter<cutlass::tfloat32_t, float, kRoundSmall> convert_small_; // convert and fill cutlass::tfloat32_t big at idx 0 result[0] = convert_big_(source); // convert and fill cutlass::tfloat32_t small at idx 1 result[1] = convert_small_(source - static_cast<float>(result[0])); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Conversion and Clamp operator for Integers // ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename T, typename S > struct NumericConverterClamp { using result_type = T; using source_type = S; CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { NumericConverter<result_type, source_type> convert_op; result_type const kClamp_max = platform::numeric_limits<result_type>::max(); result_type const kClamp_min = platform::numeric_limits<result_type>::lowest(); if (s < (source_type)kClamp_min) return kClamp_min; if (s > (source_type)kClamp_max) return kClamp_max; return convert_op(s); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; // This converter is needed to enable cutlass::half_t output types when using int32_t accumulators. // Since floating-point types do not require a clamp, this converter simply casts from // the source type to cutlass::half_t. template < typename S > struct NumericConverterClamp<cutlass::half_t, S> { using result_type = cutlass::half_t; using source_type = S; CUTLASS_HOST_DEVICE static result_type convert(source_type const &source) { return static_cast<cutlass::half_t>(source); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Conversion operator for Array // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Conversion operator for Array template < typename T, typename S, int N, FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, typename Transform = cutlass::transform::thread::UnaryTransform::Identity > struct NumericArrayConverter { using result_type = Array<T, N>; using source_type = Array<S, N>; static FloatRoundStyle const round_style = Round; static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value || platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value, "Unary Operator not supported."); CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { result_type result; NumericConverter<T, S, Round> convert_; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) { result[i] = convert_(s[i]); } else { // conjugate result[i] = conj(convert_(s[i])); } } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template < typename T, int N, FloatRoundStyle Round, typename Transform > struct NumericArrayConverter<T, T, N, Round, Transform> { using result_type = Array<T, N>; using source_type = Array<T, N>; static FloatRoundStyle const round_style = Round; static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value || platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value, "Unary Operator not supported."); CUTLASS_HOST_DEVICE static result_type convert(source_type const &source) { if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) { return source; } else { result_type result; for (int i = 0; i < N; ++i) { result[i] = conj(source[i]); } return result; } } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<half, 2> <= Array<float, 2>, round to nearest template <> struct NumericArrayConverter<cutlass::half_t, float, 2, FloatRoundStyle::round_to_nearest> { using result_type = Array<cutlass::half_t, 2>; using source_type = Array<float, 2>; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) Array<cutlass::half_t, 2> result; reinterpret_cast<__half2 &>(result) = __float22half2_rn(reinterpret_cast<float2 const &>(source)); return result; #else NumericConverter<cutlass::half_t, float, round_style> convert_; // NOTE: cutlass::Array<half, N> is NOT an aggregate type and // below `{}` does NOT conduct zero initialization. Below `{}` will // conduct default initialization (calling default ctr). We use this syntax // to resolve compiler warning on uninitialized member variable. Array<cutlass::half_t, 2> result{}; result[0] = convert_(source[0]); result[1] = convert_(source[1]); return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, 2> <= Array<cutlass::half_t, 2>, round to nearest template <FloatRoundStyle Round> struct NumericArrayConverter<float, cutlass::half_t, 2, Round> { using result_type = Array<float, 2>; using source_type = Array<cutlass::half_t, 2>; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) float2 result2 = __half22float2(reinterpret_cast<__half2 const &>(source)); return { float{result2.x}, float{result2.y} }; #else NumericConverter<float, cutlass::half_t, round_style> convert_; return { convert_(source[0]), convert_(source[1]) }; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<half> <= Array<float> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<cutlass::half_t, float, N, Round> { using result_type = Array<cutlass::half_t, N>; using source_type = Array<float, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<cutlass::half_t, float, 2, Round> convert_vector_; NumericConverter<cutlass::half_t, float, Round> convert_element_; result_type result; Array<cutlass::half_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::half_t, 2> *>(&result); Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } if (N % 2) { result[N - 1] = convert_element_(source[N - 1]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<half> <= Array<float> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<float, cutlass::half_t, N, Round> { using result_type = Array<float, N>; using source_type = Array<cutlass::half_t, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<float, cutlass::half_t, 2, Round> convert_vector_; NumericConverter<float, cutlass::half_t, Round> convert_element_; result_type result; Array<float, 2> *result_ptr = reinterpret_cast<Array<float, 2> *>(&result); Array<cutlass::half_t, 2> const *source_ptr = reinterpret_cast<Array<cutlass::half_t, 2> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } if (N % 2) { result[N - 1] = convert_element_(source[N - 1]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::bfloat16_t, 2> <= Array<float, 2>, round to nearest template <> struct NumericArrayConverter<cutlass::bfloat16_t, float, 2, FloatRoundStyle::round_to_nearest> { using result_type = Array<cutlass::bfloat16_t, 2>; using source_type = Array<float, 2>; static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { unsigned d; asm("cvt.rn.bf16x2.f32 %0, %1, %2;\n" : "=r"(d) : "f"(source[1]), "f"(source[0]) ); return reinterpret_cast<result_type const &>(d); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<cutlass::bfloat16_t> <= Array<float> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<cutlass::bfloat16_t, float, N, Round> { using result_type = Array<cutlass::bfloat16_t, N>; using source_type = Array<float, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<cutlass::bfloat16_t, float, 2, Round> convert_vector_; NumericConverter<cutlass::bfloat16_t, float, Round> convert_element_; result_type result; Array<cutlass::bfloat16_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::bfloat16_t, 2> *>(&result); Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } if (N % 2) { result[N - 1] = convert_element_(source[N - 1]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #endif // if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) ///////////////////////////////////////////////////////////////////////////////////////////////// // Conditional guards to enable partial specialization for packed integers #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && \ ((__CUDACC_VER_MAJOR__ > 10) || \ ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Partial specialization for Array<int8_t, 1> <= Array<int, 1> template < FloatRoundStyle Round > struct NumericArrayConverter<int8_t, int, 1, Round> { using result_type = Array<int8_t, 1>; using source_type = Array<int, 1>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericConverter<int8_t, int, Round> convert_element_; result_type result; result[0] = convert_element_(source[0]); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t, 2> <= Array<int, 2> template < FloatRoundStyle Round > struct NumericArrayConverter<int8_t, int, 2, Round> { using result_type = Array<int8_t, 2>; using source_type = Array<int, 2>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { uint32_t tmp; asm volatile( "cvt.pack.sat.s8.s32.b32 %0, %2, %1, 0;\n" : "=r"(tmp) : "r"(source[0]), "r"(source[1])); uint16_t out = (tmp & 0xffff); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t, 4> <= Array<int, 4> template < FloatRoundStyle Round > struct NumericArrayConverter<int8_t, int, 4, Round> { using result_type = Array<int8_t, 4>; using source_type = Array<int, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { unsigned out; asm volatile( "{ .reg .u32 r4;" "cvt.pack.sat.s8.s32.b32 r4, %4, %3, 0;" "cvt.pack.sat.s8.s32.b32 %0, %2, %1, r4;" "}" : "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3])); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t> <= Array<int> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<int8_t, int, N, Round> { static_assert(!(N % 4), "N must be multiple of 4."); using result_type = Array<int8_t, N>; using source_type = Array<int, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<int8_t, int, 4, Round> convert_vector_; result_type result; Array<int8_t, 4> *result_ptr = reinterpret_cast<Array<int8_t, 4> *>(&result); Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 4; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<uint8_t, 1> <= Array<int, 1> template < FloatRoundStyle Round > struct NumericArrayConverter<uint8_t, int, 1, Round> { using result_type = Array<uint8_t, 1>; using source_type = Array<int, 1>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericConverter<uint8_t, int, Round> convert_element_; result_type result; result[0] = convert_element_(source[0]); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<uint8_t, 2> <= Array<int, 2> template < FloatRoundStyle Round > struct NumericArrayConverter<uint8_t, int, 2, Round> { using result_type = Array<uint8_t, 2>; using source_type = Array<int, 2>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { uint32_t tmp; asm volatile( "cvt.pack.sat.u8.s32.b32 %0, %2, %1, 0;\n" : "=r"(tmp) : "r"(source[0]), "r"(source[1])); uint16_t out = (tmp & 0xffff); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<uint8_t, 4> <= Array<int, 4> template < FloatRoundStyle Round > struct NumericArrayConverter<uint8_t, int, 4, Round> { using result_type = Array<uint8_t, 4>; using source_type = Array<int, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { unsigned out; asm volatile( "{ .reg .u32 r4;" "cvt.pack.sat.u8.s32.b32 r4, %4, %3, 0;" "cvt.pack.sat.u8.s32.b32 %0, %2, %1, r4;" "}" : "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3])); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t> <= Array<int> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<uint8_t, int, N, Round> { static_assert(!(N % 4), "N must be multiple of 4."); using result_type = Array<uint8_t, N>; using source_type = Array<int, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<uint8_t, int, 4, Round> convert_vector_; result_type result; Array<uint8_t, 4> *result_ptr = reinterpret_cast<Array<uint8_t, 4> *>(&result); Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 4; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #endif ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<float, N> <=> Array<float_e4m3_t, N> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<float, 2> <= Array<float_e4m3_t, 2> template < FloatRoundStyle Round > struct NumericArrayConverter<float, cutlass::float_e4m3_t, 2, Round> { using result_element = float; using source_element = cutlass::float_e4m3_t; using result_type = Array<result_element, 2>; using source_type = Array<source_element, 2>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out_fp16; uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source); asm volatile( \ "{\n" \ "cvt.rn.f16x2.e4m3x2 %0, %1;\n" \ "}\n" : "=r"(out_fp16): "h"(src_packed)); float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16)); result_type out; out[0] = res0.x; out[1] = res0.y; return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e4m3_t, 2> <= Array<float, 2> template < FloatRoundStyle Round > struct NumericArrayConverter<float_e4m3_t, float, 2, Round> { using result_element = cutlass::float_e4m3_t; using source_element = float; using result_type = Array<result_element, 2>; using source_type = Array<source_element, 2>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t out; asm volatile( \ "{\n" \ "cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;\n" \ "}" \ : "=h"(out) : "f"(source[0]), "f"(source[1])); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, 2> <= Array<float_e5m2_t, 2> template < FloatRoundStyle Round > struct NumericArrayConverter<float, cutlass::float_e5m2_t, 2, Round> { using result_element = float; using source_element = cutlass::float_e5m2_t; using result_type = Array<result_element, 2>; using source_type = Array<source_element, 2>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out_fp16; uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source); asm volatile( \ "{\n" \ "cvt.rn.f16x2.e5m2x2 %0, %1;\n" \ "}\n" : "=r"(out_fp16): "h"(src_packed)); float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16)); result_type out; out[0] = res0.x; out[1] = res0.y; return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; namespace detail { /// Special converters that can be used with 4 8-bit elements packed in a register. /// Common use is for fast FP8 converters. template < typename T, typename S, FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, typename Transform = cutlass::transform::thread::UnaryTransform::Identity > struct NumericArrayConverterPacked4Element { using result_type = Array<T, 4>; using source_type = Array<S, 4>; static FloatRoundStyle const round_style = Round; static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value || platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value, "Unary Operator not supported."); CUTLASS_HOST_DEVICE static result_type convert(source_type const & s) { result_type result; NumericConverter<T, S, Round> convert_; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) { result[i] = convert_(s[i]); } else { // conjugate result[i] = conj(convert_(s[i])); } } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, 4> <= Array<float_e4m3_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float, cutlass::float_e4m3_t, Round> { using result_element = float; using source_element = cutlass::float_e4m3_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out_fp16[2]; uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source); asm volatile( \ "{\n" \ ".reg .b16 lo, hi;\n" \ "mov.b32 {lo, hi}, %2;\n" \ "cvt.rn.f16x2.e4m3x2 %0, lo;\n" \ "cvt.rn.f16x2.e4m3x2 %1, hi;\n" \ "}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed)); float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0])); float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1])); result_type out; out[0] = res0.x; out[1] = res0.y; out[2] = res1.x; out[3] = res1.y; return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e4m3_t, 4> <= Array<float, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e4m3_t, float, Round> { using result_element = cutlass::float_e4m3_t; using source_element = float; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out; asm volatile( \ "{\n" \ ".reg .b16 lo;\n" \ ".reg .b16 hi;\n" \ "cvt.rn.satfinite.e4m3x2.f32 lo, %2, %1;\n" \ "cvt.rn.satfinite.e4m3x2.f32 hi, %4, %3;\n" \ "mov.b32 %0, {lo, hi};\n" \ "}" \ : "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3])); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<float, 4> <=> Array<float_e5m2_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<float, 4> <= Array<float_e5m2_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float, cutlass::float_e5m2_t, Round> { using result_element = float; using source_element = cutlass::float_e5m2_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out_fp16[2]; uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source); asm volatile( \ "{\n" \ ".reg .b16 lo, hi;\n" \ "mov.b32 {lo, hi}, %2;\n" \ "cvt.rn.f16x2.e5m2x2 %0, lo;\n" \ "cvt.rn.f16x2.e5m2x2 %1, hi;\n" \ "}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed)); float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0])); float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1])); result_type out; out[0] = res0.x; out[1] = res0.y; out[2] = res1.x; out[3] = res1.y; return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e5m2_t, 4> <= Array<float, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e5m2_t, float, Round> { using result_element = cutlass::float_e5m2_t; using source_element = float; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out; asm volatile( \ "{\n" \ ".reg .b16 lo;\n" \ ".reg .b16 hi;\n" \ "cvt.rn.satfinite.e5m2x2.f32 lo, %2, %1;\n" \ "cvt.rn.satfinite.e5m2x2.f32 hi, %4, %3;\n" \ "mov.b32 %0, {lo, hi};\n" \ "}" \ : "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3])); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e4m3_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e4m3_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e4m3_t, Round> { using result_element = cutlass::half_t; using source_element = cutlass::float_e4m3_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out[2]; uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source); asm volatile( \ "{\n" \ ".reg .b16 lo, hi;\n" \ "mov.b32 {lo, hi}, %2;\n" \ "cvt.rn.f16x2.e4m3x2 %0, lo;\n" \ "cvt.rn.f16x2.e4m3x2 %1, hi;\n" \ "}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed)); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::half_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::half_t, Round> { using result_element = cutlass::float_e4m3_t; using source_element = cutlass::half_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out; uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source); asm volatile( \ "{\n" \ ".reg .b16 lo;\n" \ ".reg .b16 hi;\n" \ "cvt.rn.satfinite.e4m3x2.f16x2 lo, %1;\n" \ "cvt.rn.satfinite.e4m3x2.f16x2 hi, %2;\n" \ "mov.b32 %0, {lo, hi};\n" \ "}" \ : "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1])); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e5m2_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e5m2_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e5m2_t, Round> { using result_element = cutlass::half_t; using source_element = cutlass::float_e5m2_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out[2]; uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source); asm volatile( \ "{\n" \ ".reg .b16 lo, hi;\n" \ "mov.b32 {lo, hi}, %2;\n" \ "cvt.rn.f16x2.e5m2x2 %0, lo;\n" \ "cvt.rn.f16x2.e5m2x2 %1, hi;\n" \ "}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed)); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::half_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::half_t, Round> { using result_element = cutlass::float_e5m2_t; using source_element = cutlass::half_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint32_t out; uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source); asm volatile( \ "{\n" \ ".reg .b16 lo;\n" \ ".reg .b16 hi;\n" \ "cvt.rn.satfinite.e5m2x2.f16x2 lo, %1;\n" \ "cvt.rn.satfinite.e5m2x2.f16x2 hi, %2;\n" \ "mov.b32 %0, {lo, hi};\n" \ "}" \ : "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1])); return reinterpret_cast<result_type const &>(out); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e4m3_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e4m3_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e4m3_t, Round> { using result_element = cutlass::bfloat16_t; using source_element = cutlass::float_e4m3_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) // Convert f8 to float NumericArrayConverterPacked4Element<float, source_element, Round> src2float; Array<float, 4> tmp_floats = src2float(source); // Convert float to bf16 result_type out; Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats); Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out); NumericArrayConverter<result_element, float, 2, Round> float2result; packed_out[0] = float2result(packed_tmp[0]); packed_out[1] = float2result(packed_tmp[1]); return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::bfloat16_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::bfloat16_t, Round> { using result_element = cutlass::float_e4m3_t; using source_element = cutlass::bfloat16_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) // Convert bf16 to float Array<float, 4> tmp; Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp); Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source); NumericArrayConverter<float, source_element, 2, Round> src2float; packed_tmp[0] = src2float(packed_source[0]); packed_tmp[1] = src2float(packed_source[1]); // Convert float to f8 NumericArrayConverterPacked4Element<result_element, float, Round> float2result; return float2result(tmp); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e5m2_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e5m2_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e5m2_t, Round> { using result_element = cutlass::bfloat16_t; using source_element = cutlass::float_e5m2_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) // Convert f8 to float NumericArrayConverterPacked4Element<float, source_element, Round> src2float; Array<float, 4> tmp_floats = src2float(source); // Convert float to bf16 result_type out; Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats); Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out); NumericArrayConverter<result_element, float, 2, Round> float2result; packed_out[0] = float2result(packed_tmp[0]); packed_out[1] = float2result(packed_tmp[1]); return out; #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::bfloat16_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::bfloat16_t, Round> { using result_element = cutlass::float_e5m2_t; using source_element = cutlass::bfloat16_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) // Convert bf16 to float Array<float, 4> tmp; Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp); Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source); NumericArrayConverter<float, source_element, 2, Round> src2float; packed_tmp[0] = src2float(packed_source[0]); packed_tmp[1] = src2float(packed_source[1]); // Convert float to f8 NumericArrayConverterPacked4Element<result_element, float, Round> float2result; return float2result(tmp); #else result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; #endif } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for Array<float_e4m3_t, 4> <=> Array<float_e5m2_t, 4> // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<float_e4m3_t, 4> <= Array<float_e5m2_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::float_e5m2_t, Round> { using result_element = cutlass::float_e4m3_t; using source_element = cutlass::float_e5m2_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float_e5m2_t, 4> <= Array<float_e4m3_t, 4> template < FloatRoundStyle Round > struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::float_e4m3_t, Round> { using result_element = cutlass::float_e5m2_t; using source_element = cutlass::float_e4m3_t; using result_type = Array<result_element, 4>; using source_type = Array<source_element, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const & source) { result_type result; NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { result[i] = converter(source[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for: // Array<T, N> <=> Array<float_e4m3_t, N> // Array<T, N> <=> Array<float_e5m2_t, N> // using packed converter under the hood // ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename T, typename S, int N, FloatRoundStyle Round > struct PackedNumericArrayConverter { using result_element = T; using source_element = S; using result_type = Array<result_element, N>; using source_type = Array<source_element, N>; static FloatRoundStyle const round_style = Round; private: using packed_result_type = Array<result_element, 4>; using packed_source_type = Array<source_element, 4>; public: CUTLASS_DEVICE static result_type convert(source_type const & source) { result_type result; packed_result_type* packed_result = reinterpret_cast<packed_result_type*>(&result); const packed_source_type* packed_source = reinterpret_cast<const packed_source_type*>(&source); detail::NumericArrayConverterPacked4Element<result_element, source_element, Round> packed_converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 4; ++i) { packed_result[i] = packed_converter(packed_source[i]); } // Handle leftovers NumericConverter<result_element, source_element, Round> converter; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N % 4; ++i) { int idx = ((N / 4) * 4) + i; result[idx] = converter(source[idx]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const{ return convert(s); } }; /// Partial specialization for Array<T, N> <= Array<float_e4m3_t, N> template < typename T, int N, FloatRoundStyle Round > struct NumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> : public PackedNumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> {}; /// Partial specialization for Array<T, N> <= Array<float_e5m2_t, N> template < typename T, int N, FloatRoundStyle Round > struct NumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> : public PackedNumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> {}; /// Partial specialization for Array<float_e4m3_t, N> <= Array<S, N> template < typename S, int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e4m3_t, S, N, Round> : public PackedNumericArrayConverter<float_e4m3_t, S, N, Round> {}; /// Partial specialization for Array<float_e5m2_t, N> <= Array<S, N> template < typename S, int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e5m2_t, S, N, Round> : public PackedNumericArrayConverter<float_e5m2_t, S, N, Round> {}; /// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e5m2_t, N> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> : public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> {}; /// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e4m3_t, N> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> : public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> {}; /// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e4m3_t, N> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> : public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> {}; /// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e5m2_t, N> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> : public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> {}; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<int8_t> <= Array<float> /// Conversion is performed with saturation regardless of setting of /// the `Round` template parameter. template < FloatRoundStyle Round > struct NumericArrayConverter<int8_t, float, 1, Round> { using result_type = Array<int8_t, 1>; using source_type = Array<float, 1>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { // Convert to int to int8_t NumericConverter<int8_t, float, Round> destination_converter; result_type result; result[0] = destination_converter(source[0]); return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; // To convert a FP32 to Int that has less than 32 bits, we need to convert it to int32 first. template < typename T, int N, FloatRoundStyle Round > struct NumericArrayFP32ToIntConverter { using result_type = Array<T, N>; using source_type = Array<float, N>; static FloatRoundStyle const round_style = Round; static_assert(platform::numeric_limits<T>::is_integer, "the dest type has to be int."); CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { // Convert float to int Array<int32_t, N> temporary; NumericArrayConverter<int32_t, float, N, Round> compute_converter; temporary = compute_converter(source); // Convert to int to int8_t NumericArrayConverter<T, int32_t, N, Round> destination_converter; return destination_converter(temporary); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template < int N, FloatRoundStyle Round > struct NumericArrayConverter<int8_t, float, N, Round> { using result_type = Array<int8_t, N>; using source_type = Array<float, N>; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayFP32ToIntConverter<int8_t, N, Round> converter; return converter(source); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template < int N, FloatRoundStyle Round > struct NumericArrayConverter<uint8_t, float, N, Round> { using result_type = Array<uint8_t, N>; using source_type = Array<float, N>; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayFP32ToIntConverter<uint8_t, N, Round> converter; return converter(source); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template < int N, FloatRoundStyle Round > struct NumericArrayConverter<int4b_t, float, N, Round> { using result_type = Array<int4b_t, N>; using source_type = Array<float, N>; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayFP32ToIntConverter<int4b_t, N, Round> converter; return converter(source); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; template < int N, FloatRoundStyle Round > struct NumericArrayConverter<uint4b_t, float, N, Round> { using result_type = Array<uint4b_t, N>; using source_type = Array<float, N>; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayFP32ToIntConverter<uint4b_t, N, Round> converter; return converter(source); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \ ((__CUDACC_VER_MAJOR__ > 10) || \ ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Partial specialization for Array<int4b_t, 8> <= Array<int, 8> template < FloatRoundStyle Round > struct NumericArrayConverter<int4b_t, int, 8, Round> { using result_type = Array<int4b_t, 8>; using source_type = Array<int, 8>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { unsigned out; asm volatile( "{ .reg .u32 r4;" "cvt.pack.sat.s4.s32.b32 r4, %8, %7, 0;" "cvt.pack.sat.s4.s32.b32 r4, %6, %5, r4;" "cvt.pack.sat.s4.s32.b32 r4, %4, %3, r4;" "cvt.pack.sat.s4.s32.b32 %0, %2, %1, r4;" "}" : "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]), "r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7])); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int4b_t> <= Array<int> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<int4b_t, int, N, Round> { static_assert(!(N % 8), "N must be multiple of 8."); using result_type = Array<int4b_t, N>; using source_type = Array<int, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<int4b_t, int, 8, Round> convert_vector_; result_type result; Array<int4b_t, 8> *result_ptr = reinterpret_cast<Array<int4b_t, 8> *>(&result); Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 8; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<uint4b_t, 8> <= Array<int, 8> template < FloatRoundStyle Round > struct NumericArrayConverter<uint4b_t, int, 8, Round> { using result_type = Array<uint4b_t, 8>; using source_type = Array<int, 8>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { unsigned out; asm volatile( "{ .reg .u32 r4;" "cvt.pack.sat.u4.s32.b32 r4, %8, %7, 0;" "cvt.pack.sat.u4.s32.b32 r4, %6, %5, r4;" "cvt.pack.sat.u4.s32.b32 r4, %4, %3, r4;" "cvt.pack.sat.u4.s32.b32 %0, %2, %1, r4;" "}" : "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]), "r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7])); return reinterpret_cast<result_type const &>(out); } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int4b_t> <= Array<int> template < int N, FloatRoundStyle Round > struct NumericArrayConverter<uint4b_t, int, N, Round> { static_assert(!(N % 8), "N must be multiple of 8."); using result_type = Array<uint4b_t, N>; using source_type = Array<int, N>; static FloatRoundStyle const round_style = Round; CUTLASS_HOST_DEVICE static result_type convert(source_type const & source) { NumericArrayConverter<uint4b_t, int, 8, Round> convert_vector_; result_type result; Array<uint4b_t, 8> *result_ptr = reinterpret_cast<Array<uint4b_t, 8> *>(&result); Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 8; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } return result; } CUTLASS_HOST_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #endif // Conditional guards to enable partial specialization for packed integers namespace detail { /* A helper class that can vectorize a numeric converter with implementation for several vector widths. The vector widths must be giving in decreasing order or width, and must be a power of 2. The vector converters must produce identical results to the scalar converters for consistency. */ class VectorizedConverter { private: // Base case to handle remainder elements as scalars. template <int Offset, size_t ParentWidth, typename ArrayConverter> CUTLASS_DEVICE static void convert_helper( typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) { using ElementRes = typename ArrayConverter::result_type::Element; using ElementSrc = typename ArrayConverter::source_type::Element; // If no more converters, handle the remaining elements as scalars. constexpr int total_elements = ArrayConverter::result_type::kElements; constexpr int remainder = total_elements - Offset; static_assert(remainder == (total_elements % ParentWidth), "Unexpected remainder."); typename ArrayConverter::ScalarConverter scalar_converter; CUTLASS_PRAGMA_UNROLL for (int i = Offset; i < ArrayConverter::result_type::kElements; ++i) { result[i] = scalar_converter(ElementSrc(source[i])); } } template <int Offset, size_t ParentWidth, typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays> CUTLASS_DEVICE static void convert_helper(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) { static_assert(sizeof...(OtherVectorArrays) % 2 == 0, "Vector converters must come in {dst, src} pairs"); static_assert(ResultVectorArray::kElements == SourceVectorArray::kElements, "Vector converters must have the same vector width"); static_assert(cutlass::platform::is_same<typename ArrayConverter::result_type::Element, typename ResultVectorArray::Element>::value, "ResultVectorArray must have the same type ArrayConverter::result_type"); static_assert(cutlass::platform::is_same<typename ArrayConverter::source_type::Element, typename SourceVectorArray::Element>::value, "SourceVectorArray must have the same type ArrayConverter::result_type"); static_assert(Offset >= 0 && Offset <= ArrayConverter::result_type::kElements, "Offset must be between 0 and N"); static_assert(ParentWidth == 0 || ParentWidth > ResultVectorArray::kElements, "Vector arrays must be given in decreasing order of width"); constexpr int vector_width = ResultVectorArray::kElements; static_assert(ispow2(vector_width), "Vector width must be a power of 2"); using ElementRes = typename ArrayConverter::result_type::Element; using ElementSrc = typename ArrayConverter::source_type::Element; constexpr int vector_bits_res = vector_width * cutlass::sizeof_bits<ElementRes>::value; constexpr int vector_bits_src = vector_width * cutlass::sizeof_bits<ElementSrc>::value; static_assert(vector_bits_res % 8 == 0, "Result vector type must be byte addressed."); static_assert(vector_bits_src % 8 == 0, "Source vector type must be byte addressed."); constexpr int vector_offset = Offset / vector_width; ResultVectorArray* packed_result_vec = reinterpret_cast<ResultVectorArray*>(&result) + vector_offset; SourceVectorArray const* packed_source_vec = reinterpret_cast<SourceVectorArray const*>(&source) + vector_offset; // Convert the remaining elements as vectors. constexpr int total_elements = ArrayConverter::result_type::kElements; constexpr int groups_of_vec = (total_elements - Offset) / vector_width; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < groups_of_vec; ++i) { packed_result_vec[i] = ArrayConverter::template packed_convert<ResultVectorArray, SourceVectorArray>(packed_source_vec[i]); } constexpr int new_offset = Offset + vector_width * groups_of_vec; // Recurse to handle other vector converters, or the scalar base case. convert_helper<new_offset, ResultVectorArray::kElements, ArrayConverter, OtherVectorArrays...>(result, source); } public: /* A method to convert vectors of elements using the packed_convert method of the converter. Converters using this class must implement packed convert and support 1 or more vector conversions. */ template <typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays> CUTLASS_DEVICE static void convert(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) { convert_helper<0, 0, ArrayConverter, ResultVectorArray, SourceVectorArray, OtherVectorArrays...>(result, source); } }; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::float_e4m3_t, N> <= Array<cutlass::int4b_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::float_e4m3_t, cutlass::int4b_t, N, Round> { using result_type = Array<cutlass::float_e4m3_t, N>; using source_type = Array<cutlass::int4b_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_8 = Array<cutlass::float_e4m3_t, 8>; using result_type_packed_4 = Array<cutlass::float_e4m3_t, 4>; using source_type_packed_8 = Array<cutlass::int4b_t, 8>; using source_type_packed_4 = Array<cutlass::int4b_t, 4>; using ScalarConverter = NumericConverter<cutlass::float_e4m3_t, cutlass::int4b_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_8 const& source) { return reinterpret_cast<const uint32_t&>(source); } // The core converter uses a lookup table to converts i4 -> e4m3. template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value) || (platform::is_same<PackedSrcType, source_type_packed_8>::value && platform::is_same<PackedResultType, result_type_packed_8>::value), "Invalid PackedSrcType/PackedResultType must be 4 or 8 to use private convert dispatch."); // Hold FP8 outputs in reg. We need 1 reg for every 4 outputs. cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 4, sizeof(PackedResultType)> r; // View the input as reg uint32_t reg = to_reg(source); // Determines if to get from the signed or unsigned candidates uint32_t sign = (reg & 0x88888888) >> 1; // Ignore sign bit when indexing into LUT uint32_t lut_idx = (reg & 0x77777777); // Signed is OR'd with 0x32103210 to find the correct value in the LUT const uint32_t final_prmt_base = 0x32103210; // [0, 1, 2, 3] encoded as FP8 static constexpr uint32_t POS_E4M3s_REG1 = 0x44403800; // [4, 5, 6, 7] encoded as FP8 static constexpr uint32_t POS_E4M3s_REG2 = 0x4E4C4A48; // [-1, -2, -3, -4] encoded as FP8 static constexpr uint32_t NEG_E4M3s_REG1 = 0xCACCCED0; // [-5, -6, -7, -7] encoded as FP8 static constexpr uint32_t NEG_E4M3s_REG2 = 0xB8C0C4C8; const int iters = PackedSrcType::kElements / 4; #pragma unroll for (int ii = 0; ii < iters; ++ii, lut_idx >>=16, sign >>=16) { uint32_t final_prmt_idx = final_prmt_base | sign; // This uses a look up table to convert packed int4s to packed fp8s, using the int4 value // as the index to prmt. // It first select both the positive and negative candidates, then uses the sign bit to // select the correct candidate. asm volatile( "{\n" " .reg .b32 pos_f8s, neg_f8s;\n" " prmt.b32 pos_f8s, %1, %2, %5;\n" " prmt.b32 neg_f8s, %3, %4, %5;\n" " prmt.b32 %0, pos_f8s, neg_f8s, %6;\n" "}\n" : "=r"(r[ii]) : "n"(POS_E4M3s_REG1), "n"(POS_E4M3s_REG2), "n"(NEG_E4M3s_REG1), "n"(NEG_E4M3s_REG2), "r"(lut_idx), "r"(final_prmt_idx)); } return reinterpret_cast<PackedResultType&>(r); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_8, source_type_packed_8, result_type_packed_4, source_type_packed_4>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, N> <= Array<cutlass::int4b_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<float, cutlass::int4b_t, N, Round> { using result_type = Array<float, N>; using source_type = Array<cutlass::int4b_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_8 = Array<float, 8>; using result_type_packed_4 = Array<float, 4>; using result_type_packed_2 = Array<float, 2>; using source_type_packed_8 = Array<cutlass::int4b_t, 8>; using source_type_packed_4 = Array<cutlass::int4b_t, 4>; using source_type_packed_2 = Array<cutlass::int4b_t, 2>; using ScalarConverter = NumericConverter<float, cutlass::int4b_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint8_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_8 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <int offset, int elements_to_convert, typename PackedResultType> CUTLASS_DEVICE static void packed_convert_vec(PackedResultType& result, uint32_t src_reg) { static_assert(offset == 0 || offset == 4, "Invalid offset"); // Selects one of the bottom int4s and constructs: // 8388608 + (x + 8) // 8388608 + 16 * (x + 8) // 8388608 + 256 * (x + 8) // 8388608 + 4096 * (x + 8) uint32_t const and_masks[4] = {0x0000000F, 0x000000F0, 0x00000F00, 0x0000F000}; uint32_t const xor_masks[4] = {0x4B000008, 0x4B000080, 0x4B000800, 0x4B008000}; float const scales[4] = {1.f, 1.f / 16.f, 1.f / 256.f, 1.f / 4096.f}; float const offsets[4] = {-8388616.f, -524296.f, -32776.f, -2056.f}; static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa; uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&result); // For each operand, computes: // r[i] = (r[i] & and_mask) ^ xor_mask CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < elements_to_convert; ++ii) { asm volatile( "{\n" " lop3.b32 %0, %1, %2, %3, %4;\n" "}\n" : "=r"(result_as_int[offset + ii]) : "r"(src_reg), "r"(and_masks[ii]), "r"(xor_masks[ii]), "n"(immLut)); result[offset + ii] = __fmaf_rn(result[offset + ii], scales[ii], offsets[ii]); } } // The core converter uses bit tricks to construct a known FP16 number, then does a // subtraction in FP16 for the final result. template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value) || (platform::is_same<PackedSrcType, source_type_packed_8>::value && platform::is_same<PackedResultType, result_type_packed_8>::value), "Invalid PackedSrcType/PackedResultType must be 1, 2, 4 or 8 to use private convert dispatch."); // Hold output FP16s in reg. We need 1 reg for every 2 elements PackedResultType r; // View the input as reg uint32_t src_reg = to_reg(source); constexpr int total_elements = PackedResultType::kElements == 8 ? 4 : PackedResultType::kElements; packed_convert_vec<0, total_elements>(r, src_reg); if (PackedResultType::kElements == 8) { uint32_t src_reg_shifted = src_reg >> 16; packed_convert_vec<4, 4>(r, src_reg_shifted); } return r; } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_8, source_type_packed_8, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, N> <= Array<int8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<float, int8_t, N, Round> { using result_type = Array<float, N>; using source_type = Array<int8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<float, 4>; using result_type_packed_2 = Array<float, 2>; using source_type_packed_4 = Array<int8_t, 4>; using source_type_packed_2 = Array<int8_t, 2>; using ScalarConverter = NumericConverter<float, int8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); PackedResultType r; // View the input as reg uint32_t src_reg = to_reg(source); static constexpr int fp32_base = 0x4B400000; uint32_t const prmt_indices[4] = {0x8880, 0x9991, 0xAAA2, 0xBBB3}; int* result_as_int = reinterpret_cast<int*>(&r); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < PackedResultType::kElements; ++ii) { asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(result_as_int[ii]) : "r"(src_reg), "r"(prmt_indices[ii])); } CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < PackedResultType::kElements; ++ii) { result_as_int[ii] += fp32_base; r[ii] -= reinterpret_cast<const float&>(fp32_base); } return r; } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float, N> <= Array<uint8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<float, uint8_t, N, Round> { using result_type = Array<float, N>; using source_type = Array<uint8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<float, 4>; using result_type_packed_2 = Array<float, 2>; using source_type_packed_4 = Array<uint8_t, 4>; using source_type_packed_2 = Array<uint8_t, 2>; using ScalarConverter = NumericConverter<float, uint8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); PackedResultType r; // View the input as reg uint32_t src_reg = to_reg(source); // __byte_perm simulates the add.u32 0x4B000000 to every u8 element of u8x4 source and stores // the result in r (without introducing extra cvt.u32.u8 instruction) uint32_t const prmt_indices[4] = {0x7650, 0x7651, 0x7652, 0x7653}; uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&r); for (int ii = 0; ii < PackedResultType::kElements; ++ii) { result_as_int[ii] = __byte_perm(src_reg, 0x4B000000, prmt_indices[ii]); // Subtract the magic number 0x4B000000 from tmp in floating-point arithmetic to obtain final result r[ii] -= 8388608.f; } return r; } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::half_t, N> <= Array<cutlass::int4b_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::half_t, cutlass::int4b_t, N, Round> { using result_type = Array<cutlass::half_t, N>; using source_type = Array<cutlass::int4b_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_8 = Array<cutlass::half_t, 8>; using result_type_packed_4 = Array<cutlass::half_t, 4>; using result_type_packed_2 = Array<cutlass::half_t, 2>; using source_type_packed_8 = Array<cutlass::int4b_t, 8>; using source_type_packed_4 = Array<cutlass::int4b_t, 4>; using source_type_packed_2 = Array<cutlass::int4b_t, 2>; using ScalarConverter = NumericConverter<cutlass::half_t, cutlass::int4b_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint8_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_8 const& source) { return reinterpret_cast<const uint32_t&>(source); } // The core converter uses bit tricks to construct a known FP16 number, then does a // subtraction in FP16 for the final result. template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value) || (platform::is_same<PackedSrcType, source_type_packed_8>::value && platform::is_same<PackedResultType, result_type_packed_8>::value), "Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch."); // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>; RegArray r; // View the input as reg uint32_t src_reg = to_reg(source); // Below constructs the following temporary: // fp16s_01 = {0x00, i4_01, 0x00, i4_01} // fp16s_23 = {0x00, i4_23, 0x00, i4_23} // fp16s_45 = {0x00, i4_45, 0x00, i4_45} // fp16s_67 = {0x00, i4_67, 0x00, i4_67} // We use inline asm instead of __byte_perm intrinsic since we don't want the documented (& 0x7) on the index. NVCC // might be able to optimize it out since the index is a constexpr, but we choose to be safe about it here. uint32_t prmt_indices[4] = {0x4040, 0x4141, 0x4242, 0x4343}; static_assert(RegArray::kElements <= 4, "Too many inputs for F16 -> I4 vector converter"); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile( "{\n" " prmt.b32 %0, %1, %2, %3;\n" "}\n" : "=r"(r[ii]) : "r"(src_reg), "n"(0), "r"(prmt_indices[ii])); } // The below XOR does the following: // 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing // 1024 + x + 8 OR 1024 + 16 * (x + 8), then using hfma to subtract 1032 from that // 2) Adds 8 to the int4 value that we will process in the FP16 (for uint4, we can simply avoid this step) // The AND does the following: // 1) Clear the set bits for the int4 we will ignore. // We use lop3 so that we can use 1 instruction for AND and XOR. static constexpr uint32_t xor_mask = 0x64806408; static constexpr uint32_t and_mask = 0xFFF0FF0F; static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa; // For each operand, computes: // r[i] = (r[i] & and_mask) ^ xor_mask CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile( "{\n" " lop3.b32 %0, %0, %1, %2, %3;\n" "}\n" : "+r"(r[ii]) : "n"(and_mask), "n"(xor_mask), "n"(immLut)); } // We will issue 2 hfmas that do the following: // For the high FP16: // Divide by 16 {packed as a operand} to get: // 64 + (x + 8) // x + 72 // Subtract 72 {packed as c operand} to get x // For the low FP16: // 1024 + (x + 8) // x + 1032 // So, we subtract 1032 {packed as c operand} to get x // {-72, -1032} static constexpr uint32_t hfma_bias_rep = 0xD480E408; // {1 / 16, 1} static constexpr uint32_t hfma_scale_rep = 0x2C003C00; const half2& hfma_bias = reinterpret_cast<const half2&>(hfma_bias_rep); const half2& hfma_scale = reinterpret_cast<const half2&>(hfma_scale_rep); // Scale and subtract the FP16s to get the original int4 number as FP16. CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]); fp16x2_val = __hfma2(hfma_scale, fp16x2_val, hfma_bias); } return reinterpret_cast<PackedResultType&>(r); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_8, source_type_packed_8, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<cutlass::half_t, N> <= Array<int8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::half_t, int8_t, N, Round> { using result_type = Array<cutlass::half_t, N>; using source_type = Array<int8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<cutlass::half_t, 4>; using result_type_packed_2 = Array<cutlass::half_t, 2>; using source_type_packed_4 = Array<int8_t, 4>; using source_type_packed_2 = Array<int8_t, 2>; using ScalarConverter = NumericConverter<cutlass::half_t, int8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } // The core converter uses bit tricks to construct a known FP16 number, then does a // subtraction in FP16 for the final result. template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>; RegArray r; #if 0 // Scalar conversion (Please keep this code for reference for vectorized version below) auto result = reinterpret_cast<PackedResultType&>(r); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < PackedResultType::kElements; ++i) { int16_t tmp = source[i] + 26112 /* 0x6600 */; result[i] = reinterpret_cast<cutlass::half_t const &>(tmp) - 1536.0_hf; } #endif // View the input as reg uint32_t src_reg = to_reg(source); uint32_t const prmt_indices[2] = {0x9180, 0xB3A2}; // Pack s8x2 (s8[1], s8[0]) -> s16x2 (sext.s8[1], sext.s8[0]) // (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt) // The inline ptx below uses `msb=0` and `msb=1` from the above link to sign-extend the sign bit in 0, 1, 2, 3 bytes of s8x4 // into result_ptr[0] and result_ptr[1]'s 08-15 and 24-31 bits, respectively. // Note that `__byte_perm(source_ptr[0], source_ptr[0], 0x9180);` won't achieve the same result and doesn't sign-extend the sign bit. // Thus, we use inline ptx `prmt.b32` instruction for the desired sign extend from s8x2 to s16x2. for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(r[ii]) : "r"(src_reg), "r"(prmt_indices[ii])); } // In the absense of add.s16x2 instruction, use bit-wise operation to execute signed addition with magic numbers to achieve // the same result as add.s16x2 instruction. // (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#logic-and-shift-instructions-lop3) // For a logical operation F(a, b, c) the value of kImmLut can be computed by applying the same operation to // three predefined constant values as follows: // ta = 0xF0; // tb = 0xCC; // tc = 0xAA; // kImmLut = F(ta, tb, tc); // If we want F = ((a & b) ^ c) then set kImmLut = (0xF0 & 0xCC) ^ 0xAA static constexpr uint32_t kImmLut = (0xF0 & 0xCC) ^ 0xAA; for (int ii = 0; ii < RegArray::kElements; ++ii) { // The bit-wise operation executed below is `r[ii] = (r[ii] & 0x03FF03FF) ^ 0x66006600;` asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" : "=r"(r[ii]) : "r"(r[ii]), "n"(0x03FF03FF), "n"(0x66006600), "n"(kImmLut)); } static constexpr uint32_t bias_rep = 0x66006600; const half2& bias = reinterpret_cast<const half2&>(bias_rep); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]); fp16x2_val = __hsub2(fp16x2_val, bias); } return reinterpret_cast<PackedResultType&>(r); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<cutlass::half_t, N> <= Array<uint8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::half_t, uint8_t, N, Round> { using result_type = Array<cutlass::half_t, N>; using source_type = Array<uint8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<cutlass::half_t, 4>; using result_type_packed_2 = Array<cutlass::half_t, 2>; using source_type_packed_4 = Array<uint8_t, 4>; using source_type_packed_2 = Array<uint8_t, 2>; using ScalarConverter = NumericConverter<cutlass::half_t, uint8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>; RegArray r; // View the input as reg uint32_t src_reg = to_reg(source); uint32_t const prmt_indices[2] = {0x5150, 0x5352}; static constexpr uint32_t start_byte_for_fp16 = 0x64646464; for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile("prmt.b32 %0,%1,%2,%3;\n" : "=r"(r[ii]) : "r"(src_reg), "n"(start_byte_for_fp16), "r"(prmt_indices[ii])); } static constexpr uint32_t bias_rep = 0x64006400; const half2& bias = reinterpret_cast<const half2&>(bias_rep); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]); fp16x2_val = __hsub2(fp16x2_val, bias); } return reinterpret_cast<PackedResultType&>(r); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<cutlass::int4b_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::bfloat16_t, cutlass::int4b_t, N, Round> { using result_type = Array<cutlass::bfloat16_t, N>; using source_type = Array<cutlass::int4b_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_8 = Array<cutlass::bfloat16_t, 8>; using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>; using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>; using source_type_packed_8 = Array<cutlass::int4b_t, 8>; using source_type_packed_4 = Array<cutlass::int4b_t, 4>; using source_type_packed_2 = Array<cutlass::int4b_t, 2>; using ScalarConverter = NumericConverter<cutlass::bfloat16_t, cutlass::int4b_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint8_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_8 const& source) { return reinterpret_cast<const uint32_t&>(source); } // The core converter uses bit tricks to construct a known FP16 number, then does a // subtraction in FP16 for the final result. template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value) || (platform::is_same<PackedSrcType, source_type_packed_8>::value && platform::is_same<PackedResultType, result_type_packed_8>::value), "Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch."); // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>; RegArray r; // View the input as reg uint32_t src_reg = to_reg(source); uint32_t src_reg_shifted = src_reg >> 4; // Below constructs the following temporary: uint32_t const prmt_indices[4] = {0xF4F0, 0xF5F1, 0xF6F2, 0xF7F3}; static_assert(RegArray::kElements <= 4, "Too many inputs for BF16 -> I4 vector converter"); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile( "{\n" " prmt.b32 %0, %1, %2, %3;\n" "}\n" : "=r"(r[ii]) : "r"(src_reg), "r"(src_reg_shifted), "r"(prmt_indices[ii])); } // The below XOR does the following: // 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing // 128 + (x + 8) and subtracting 136 to get x static constexpr uint32_t xor_mask = 0x43084308; static constexpr uint32_t and_mask = 0x000F000F; static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa; // For each operand, computes: // r[i] = (r[i] & and_mask) ^ xor_mask CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { asm volatile( "{\n" " lop3.b32 %0, %0, %1, %2, %3;\n" "}\n" : "+r"(r[ii]) : "n"(and_mask), "n"(xor_mask), "n"(immLut)); } // We will issue 2 bfmas that do the following: // high BF16: // hi_bf16 - 136, lo_bf16 - 136 // This is the BF16 {136, 136} represented as an integer. static constexpr uint32_t bias_rep = 0x43084308; const __nv_bfloat162& bias = reinterpret_cast<const __nv_bfloat162&>(bias_rep); CUTLASS_PRAGMA_UNROLL for (int ii = 0; ii < RegArray::kElements; ++ii) { __nv_bfloat162& bf16x2_val = reinterpret_cast<__nv_bfloat162&>(r[ii]); bf16x2_val = __hsub2(bf16x2_val, bias); } return reinterpret_cast<PackedResultType&>(r); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_8, source_type_packed_8, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<int8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::bfloat16_t, int8_t, N, Round> { using result_type = Array<cutlass::bfloat16_t, N>; using source_type = Array<int8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>; using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>; using source_type_packed_4 = Array<int8_t, 4>; using source_type_packed_2 = Array<int8_t, 2>; using ScalarConverter = NumericConverter<cutlass::bfloat16_t, int8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); NumericArrayConverter<float, int8_t, PackedResultType::kElements, Round> convert_int8_to_f32; Array<float, PackedResultType::kElements> tmp = convert_int8_to_f32(source); NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16; return convert_f32_to_bf16(tmp); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<uint8_t, N> template <FloatRoundStyle Round, int N> struct NumericArrayConverter<cutlass::bfloat16_t, uint8_t, N, Round> { using result_type = Array<cutlass::bfloat16_t, N>; using source_type = Array<uint8_t, N>; static FloatRoundStyle const round_style = Round; private: using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>; using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>; using source_type_packed_4 = Array<uint8_t, 4>; using source_type_packed_2 = Array<uint8_t, 2>; using ScalarConverter = NumericConverter<cutlass::bfloat16_t, uint8_t, Round>; CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_2 const& source) { return static_cast<uint32_t>( reinterpret_cast<const uint16_t&>(source)); } CUTLASS_DEVICE static uint32_t to_reg(source_type_packed_4 const& source) { return reinterpret_cast<const uint32_t&>(source); } template <typename PackedResultType, typename PackedSrcType> CUTLASS_DEVICE static PackedResultType packed_convert(PackedSrcType const &source) { static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value && platform::is_same<PackedResultType, result_type_packed_2>::value) || (platform::is_same<PackedSrcType, source_type_packed_4>::value && platform::is_same<PackedResultType, result_type_packed_4>::value), "Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch."); NumericArrayConverter<float, uint8_t, PackedResultType::kElements, Round> convert_uint8_to_f32; Array<float, PackedResultType::kElements> tmp = convert_uint8_to_f32(source); NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16_; return convert_f32_to_bf16_(tmp); } friend class detail::VectorizedConverter; public: CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>; detail::VectorizedConverter::convert<ConverterType, result_type_packed_4, source_type_packed_4, result_type_packed_2, source_type_packed_2>(result, source); return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; #endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) ///////////////////////////////////////////////////////////////////////////////////////////////// /// FastNumericArrayConverter only works when the source is within center range. /// Conversion operator for Array. See the comments before /// FastLinearCombinationClamp. template <typename T, typename S, int N, FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, typename Enable = void> struct FastNumericArrayConverter { using result_type = Array<T, N>; using source_type = Array<S, N>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const &s) { NumericArrayConverter<T, S, N, Round> convert_; return convert_(s); } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<float> <= Array<int> template <int N, FloatRoundStyle Round> struct FastNumericArrayConverter<float, int, N, Round> { using result_type = Array<float, N>; using source_type = Array<int, N>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const &source) { result_type result; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { int tmp = source[i] + 1262485504 /*0x4B400000*/; result[i] = reinterpret_cast<float const &>(tmp) - 12582912.0f; } return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t, 4> <= Array<float, 4> template <FloatRoundStyle Round> struct FastNumericArrayConverter<int8_t, float, 4, Round> { using result_type = Array<int8_t, 4>; using source_type = Array<float, 4>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const &source) { Array<int32_t, 4> result; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 4; ++i) { float tmp = source[i] + 12582912.0f; result[i] = reinterpret_cast<int32_t const &>(tmp); } result[0] = __byte_perm(result[0], result[1], 0x40); result[2] = __byte_perm(result[2], result[3], 0x40); result[0] = __byte_perm(result[0], result[2], 0x5410); return reinterpret_cast<result_type const &>(result[0]); } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; /// Partial specialization for Array<int8_t> <= Array<float> template <int N, FloatRoundStyle Round> struct FastNumericArrayConverter<int8_t, float, N, Round> { static_assert(!(N % 4), "N must be multiple of 4."); using result_type = Array<int8_t, N>; using source_type = Array<float, N>; static FloatRoundStyle const round_style = Round; CUTLASS_DEVICE static result_type convert(source_type const &source) { FastNumericArrayConverter<int8_t, float, 4, Round> convert_vector_; result_type result; Array<int8_t, 4> *result_ptr = reinterpret_cast<Array<int8_t, 4> *>(&result); Array<float, 4> const *source_ptr = reinterpret_cast<Array<float, 4> const *>(&source); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 4; ++i) { result_ptr[i] = convert_vector_(source_ptr[i]); } return result; } CUTLASS_DEVICE result_type operator()(source_type const &s) const { return convert(s); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines preferred rounding mode for a pair of types template <typename T, typename S> struct PreferredRoundingMode { static FloatRoundStyle const kRound = FloatRoundStyle::round_to_nearest; }; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 900 /// Defines preferred rounding mode for a pair of types template <> struct PreferredRoundingMode<cutlass::tfloat32_t, float> { static FloatRoundStyle const kRound = FloatRoundStyle::round_half_ulp_truncate; }; #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Packs predicates into an array. template <int N> struct PackPredicates { using result_type = Array<uint1b_t, N>; static_assert(!(N % 4), "Must pack predicates in a count that is a multiple of 4"); CUTLASS_HOST_DEVICE result_type operator()(bool const predicates[]) { result_type packed; packed.clear(); int const kWordSize = 8; uint8_t *bytes = reinterpret_cast<uint8_t *>(packed.data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { int word_idx = (i / kWordSize); int bit_idx = (i % kWordSize); uint8_t mask = static_cast<uint8_t>((predicates[i] ? 1u : 0u) << bit_idx); bytes[word_idx] = (bytes[word_idx] | mask); } return packed; } }; /// Packs predicates into an array template <int N> struct UnpackPredicates { using result_type = Array<uint1b_t, N>; static_assert(!(N % 4), "Must unpack predicates in a count that is a multiple of 4"); CUTLASS_HOST_DEVICE void operator()(bool predicates[], result_type const &packed) { int const kWordSize = 8; uint8_t const *bytes = reinterpret_cast<uint8_t const *>(packed.data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { int word_idx = (i / kWordSize); int bit_idx = (i % kWordSize); predicates[i] = bool((bytes[word_idx] >> bit_idx) & 0x1); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/numeric_conversion.h/0
{ "file_path": "cutlass/include/cutlass/numeric_conversion.h", "repo_id": "cutlass", "token_count": 48702 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over one or more ranks of an affine tensor */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/fast_math.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/device_kernel.h" #include "cutlass/reduction/thread/reduction_operators.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure template < int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) int ReducedRank, ///< Rank of reduced tensor (i.e. number of outer ranks) typename ElementOutput, ///< Data type of output tensor typename ElementSource, ///< Data type of source tensor typename ReductionOp, ///< Reduction operator int VectorLength = 1, ///< Vector length for memory typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation int Threads = 256, ///< Number of participating threads int BatchSize = 4 ///< Number of elements to load per batch > struct TensorReductionAffineContiguousParams { static int const kRank = Rank; static int const kReducedRank = ReducedRank; static int const kVectorLength = VectorLength; static int const kInnerRank = kRank - kReducedRank; static int const kThreads = Threads; static int const kBatchSize = BatchSize; Coord<kRank> extent; /// Extent of source tensor FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank int64_t dst_stride[kReducedRank]; /// stride (units of bytes) - I, J int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K int64_t workspace_stride; /// stride (units of bytes) between workspace int workspace_count; /// number of workspaces uint64_t inner_count; /// Number of elements in reduced index space uint64_t outer_count; /// Number of elements in outer index space ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank ElementSource const * source; /// Pointer to source pointer of rank kRank ReductionOp reduction_op; /// Reduction operator ElementCompute reduction_identity; /// Identity element used by reduction operator ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorReductionAffineContiguousParams() { } /// Ctor TensorReductionAffineContiguousParams( Coord<kRank> extent_, ///< Extent of source tensor ElementOutput * dst_ptr_, ///< Output tensor data int64_t dst_stride_[], ///< Stride (units of elements) ElementSource const * src_ptr_, ///< Source tensor data int64_t src_stride_[], ///< Stride (units of elements) ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions int64_t workspace_stride_, ///< Stride between workspaces int workspace_count_, ///< Number of workspaces ReductionOp reduction_op_, ///< Reduction operator ElementCompute reduction_identity_ = ElementCompute() ///< Identity element used by reduction operator ): extent(extent_), inner_count(1), outer_count(1), destination(dst_ptr_), source(src_ptr_), device_workspace(device_workspace_), workspace_stride(workspace_stride_), workspace_count(workspace_count_), reduction_op(reduction_op_), reduction_identity(reduction_identity_) { // Initialize divisors for fast div-mod for (int p = 1; p < kRank; ++p) { divmod[p - 1] = FastDivmodU64(uint64_t(extent[p])); } int input_size_bits = sizeof_bits<ElementSource>::value; int output_size_bits = sizeof_bits<ElementOutput>::value; // Compute strides in units of bytes for (int p = 0; p < kReducedRank; ++p) { dst_stride[p] = dst_stride_[p] * output_size_bits / 8; } for (int p = 0; p < kRank - 1; ++p) { src_stride[p] = src_stride_[p] * input_size_bits / 8; } // Compute number of elements in strided ranks for (int p = 0; p < kReducedRank; ++p) { outer_count *= uint64_t(extent[p]); } for (int p = 0; p < kInnerRank; ++p) { inner_count *= uint64_t(extent[kRank - 1 - p]); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to reduce a tensor with affine layout over a set of ranks *INCLUDING* the contiguous /// rank. This leads to favorable vectorized memory accesses over the contiguous rank. template < int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) typename ElementOutput, ///< Data type of output tensor typename ElementSource, ///< Data type of source tensor typename ReductionOp, ///< Reduction operator int VectorLength = 1, ///< Vector length for memory typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation int Threads = 256, ///< Number of participating threads int BatchSize = 4 ///< Number of elements to load per batch > class TensorReductionAffineContiguous { public: static int const kRank = Rank; static int const kReducedRank = ReducedRank; static int const kVectorLength = VectorLength; static int const kInnerRank = kRank - kReducedRank; static int const kThreads = Threads; static int const kBatchSize = BatchSize; using ComputeFragment = Array<ElementCompute, VectorLength>; using SourceFragment = AlignedArray<ElementSource, VectorLength>; using OutputFragment = AlignedArray<ElementOutput, VectorLength>; /// Shared memory allocation used for reduction within the CTA struct SharedStorage { Array<ElementCompute, kThreads * kVectorLength> workspace; }; /// Parameters structure using Params = TensorReductionAffineContiguousParams< Rank, ReducedRank, ElementOutput, ElementSource, ReductionOp, VectorLength, ElementCompute, Threads, BatchSize >; private: /// Computes the coordinate and offset of a given linear index CUTLASS_DEVICE void compute_inner_coord_and_offset_( Params const &params, Coord<kInnerRank> & coord, int64_t &src_offset, uint64_t linear_idx) const { // Decompose into a coordinate of rank <kInnerRank> coord = CoordinateDecomposition<kInnerRank>(linear_idx, &params.divmod[kRank - kInnerRank]); // Compute an offset using the souce stride src_offset = 0; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kInnerRank - 1; ++i) { src_offset += coord[i] * params.src_stride[kReducedRank + i]; } src_offset += coord[kInnerRank - 1] * sizeof_bits<ElementSource>::value / 8; } /// Computes the coordinate and offset of a given linear index CUTLASS_DEVICE void compute_outer_coord_and_offset_( Params const &params, Coord<kReducedRank> & coord, int64_t &dst_offset, int64_t &src_offset, uint64_t linear_idx) const { // Decompose into coordinate of rank <kReducedRank> coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod); // Compute offsets using destination and source strides dst_offset = 0; src_offset = 0; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kReducedRank; ++i) { dst_offset += params.dst_stride[i] * coord[i]; src_offset += params.src_stride[i] * coord[i]; } } /// Reduces over the reduction indices yielding a single element CUTLASS_DEVICE ElementCompute reduce_indices_( Params const &params, ElementCompute *threadblock_workspace, char const *src_byte_ptr, int coord_c) { NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source; ReductionOp reduction_op(params.reduction_op); // // Early exit or initialize to identity element // if (!params.inner_count) { return params.reduction_identity; } ComputeFragment accumulator; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < int(accumulator.size()); ++i) { accumulator[i] = params.reduction_identity; } // Compute the coordinate of the first access int64_t src_byte_offset = 0; Coord<kInnerRank> coord; uint64_t linear_idx = (threadIdx.x + blockDim.x * threadIdx.z + blockDim.x * blockIdx.z * blockDim.z) * kVectorLength; compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); // Load the first vector SourceFragment source_fragment[kBatchSize]; bool not_done = true; // Iterate over vectors in a linearized reduction index space while (not_done) { bool guards[kBatchSize]; // Issue a batch of loads CUTLASS_PRAGMA_UNROLL for (int b = 0; b < kBatchSize; ++b) { if (linear_idx < params.inner_count) { source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset); guards[b] = true; } else { guards[b] = false; not_done = false; } linear_idx += (blockDim.z * gridDim.z * blockDim.x) * kVectorLength; compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); } // Perform a batch of reduction operations CUTLASS_PRAGMA_UNROLL for (int b = 0; b < kBatchSize; ++b) { if (guards[b]) { auto cvt = convert_source(source_fragment[b]); accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator( reduction_op, accumulator, cvt); } } }; // // Reduction of vectors to scalar // ElementCompute reduced_accumulator = accumulator[0]; CUTLASS_PRAGMA_UNROLL for (int i = 1; i < kVectorLength; ++i) { reduced_accumulator = reduction_op(reduced_accumulator, accumulator[i]); } // // Reduction within CTA across threadIdx.xz => threadIdx{.x = 0, .z = 0} // // This re-arranges data so threadIdx.y is effectively a row index and threadIdx.xz is a column // int thread_count = blockDim.x * blockDim.z; int thread_j = threadIdx.x + blockDim.x * threadIdx.z; int thread_i = threadIdx.y; ElementCompute *frag_ptr = reinterpret_cast<ElementCompute *>(threadblock_workspace) + thread_i * thread_count; frag_ptr[thread_j] = reduced_accumulator; // // Reduce // CUTLASS_PRAGMA_NO_UNROLL while (thread_count > 1) { thread_count /= 2; __syncthreads(); if (thread_j < thread_count) { ElementCompute other = frag_ptr[thread_j + thread_count]; reduced_accumulator = reduction_op(reduced_accumulator, other); frag_ptr[thread_j] = reduced_accumulator; } __syncthreads(); } return reduced_accumulator; } public: /// Perform a reduction CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength; char const * src_byte_ptr = reinterpret_cast<char const *>(params.source); char * dst_byte_ptr = nullptr; // If performing a reduction across CTAs, redirect output to device workspace if (gridDim.z == 1) { dst_byte_ptr = reinterpret_cast<char *>(params.destination); } else { dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace); } uint64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y; // Use modulo division to compute location Coord<kReducedRank> outer_coord; int64_t dst_byte_offset; int64_t src_byte_offset; compute_outer_coord_and_offset_( params, outer_coord, dst_byte_offset, src_byte_offset, idx_linear); if (gridDim.z == 1) { /// Complete the reduction with no workspace while (idx_linear < params.outer_count) { ElementCompute result = reduce_indices_( params, shared_storage.workspace.data(), src_byte_ptr + src_byte_offset, coord_c); // Store the result after possible final reduction within the CTA if (threadIdx.z == 0 && threadIdx.x == 0) { // Convert to output type and store NumericConverter<ElementOutput, ElementCompute> convert_output; ElementOutput cvt = convert_output(result); *reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = cvt; } __syncthreads(); // Update indices and pointers idx_linear += gridDim.y * blockDim.y; compute_outer_coord_and_offset_( params, outer_coord, dst_byte_offset, src_byte_offset, idx_linear); } // while } else { /// Complete the reduction with workspace while (idx_linear < params.outer_count) { ElementCompute result = reduce_indices_( params, shared_storage.workspace.data(), src_byte_ptr + src_byte_offset, coord_c); int64_t byte_offset = blockIdx.z * params.workspace_stride + idx_linear * sizeof_bits<ElementCompute>::value / 8; // Store the result for final reduction if (threadIdx.z == 0 && threadIdx.x == 0) { *reinterpret_cast<ElementCompute *>(dst_byte_ptr + byte_offset) = result; } __syncthreads(); // Update indices and pointers idx_linear += gridDim.y * blockDim.y; compute_outer_coord_and_offset_( params, outer_coord, dst_byte_offset, src_byte_offset, idx_linear); } // while } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to perform final reduction template < int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) typename ElementOutput, ///< Data type of output tensor typename ElementSource, ///< Data type of source tensor typename ReductionOp, ///< Reduction operator int VectorLength = 1, ///< Vector length for memory typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation int Threads = 256, ///< Number of participating threads int BatchSize = 4 ///< Number of elements to load per batch > class TensorReductionAffineContiguousFinal { public: static int const kRank = Rank; static int const kReducedRank = ReducedRank; static int const kVectorLength = VectorLength; static int const kInnerRank = kRank - kReducedRank; static int const kThreads = Threads; static int const kBatchSize = BatchSize; /// Shared memory struct SharedStorage { }; /// Parameters structure using Params = TensorReductionAffineContiguousParams< Rank, ReducedRank, ElementOutput, ElementSource, ReductionOp, VectorLength, ElementCompute, Threads, BatchSize >; private: /// Computes the coordinate and offset of a given linear index CUTLASS_DEVICE void compute_outer_coord_and_offset_( Params const &params, Coord<kReducedRank> & coord, int64_t &dst_offset, uint64_t linear_idx) const { // Decompose into coordinate of rank <kReducedRank> coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod); // Compute offsets using destination and source strides dst_offset = 0; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kReducedRank; ++i) { dst_offset += params.dst_stride[i] * coord[i]; } } /// Reduces over the reduction indices CUTLASS_DEVICE ElementCompute reduce_indices_( Params const &params, ElementCompute const *device_workspace) { ReductionOp reduction_op(params.reduction_op); char const *src_byte_ptr = reinterpret_cast<char const *>(device_workspace); // Accumulated output ElementCompute accumulator = params.reduction_identity; for (int iter = 0; iter < params.workspace_count; ++iter) { ElementCompute workspace_item = *reinterpret_cast<ElementCompute const *>(src_byte_ptr); accumulator = reduction_op(accumulator, workspace_item); src_byte_ptr += params.workspace_stride; } return accumulator; } public: // // Methods // /// Perform a reduction CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { uint64_t idx_linear = blockIdx.x * blockDim.x + threadIdx.x; char * dst_byte_ptr = reinterpret_cast<char *>(params.destination); // Use modulo division to compute location Coord<kReducedRank> outer_coord; int64_t dst_byte_offset; compute_outer_coord_and_offset_( params, outer_coord, dst_byte_offset, idx_linear); /// Complete the reduction while (idx_linear < params.outer_count) { ElementCompute result = reduce_indices_(params, params.device_workspace + idx_linear); // Convert to output type and store NumericConverter<ElementOutput, ElementCompute> convert_output; *reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = convert_output(result); // Update indices and pointers idx_linear += gridDim.x * blockDim.x; compute_outer_coord_and_offset_( params, outer_coord, dst_byte_offset, idx_linear); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace reduction } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h/0
{ "file_path": "cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h", "repo_id": "cutlass", "token_count": 7946 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing how threads are mapped to a given tile. */ #pragma once #include "cute/arch/mma_sm90_gmma.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace collective { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { using namespace cute; template <bool Transpose, class SmemLayoutAtom, class ElementType> constexpr auto gmma_smem_transpose_or_passthrough() { if constexpr (Transpose) { if constexpr (cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemLayoutAtom>) { return GMMA::Layout_K_SW128_Atom<ElementType>{}; } else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemLayoutAtom>) { return GMMA::Layout_K_SW64_Atom<ElementType>{}; } else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW32_Atom<ElementType>, SmemLayoutAtom>) { return GMMA::Layout_K_SW32_Atom<ElementType>{}; } else if constexpr (cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemLayoutAtom>) { return GMMA::Layout_K_INTER_Atom<ElementType>{}; } else { static_assert(cutlass::detail::dependent_false<SmemLayoutAtom>, "Unsupported Layout_SW_Atom for B SMEM transposition"); } } else { return SmemLayoutAtom{}; } } template <class SmemCopyAtom, class ElementType> constexpr auto use_universal_transposition() { if constexpr (sizeof(ElementType) == 1) { return !cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemCopyAtom>; } else if constexpr (sizeof(ElementType) == 4){ // Only universal transposition can handle SW64 and Non swizzle SMEM layout if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemCopyAtom> || cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemCopyAtom>) { return true; } else { return false; } } else { static_assert(cutlass::detail::dependent_false<ElementType>, "Unsupported ElementType for B SMEM transposition"); } } template< class TiledMma_, class SmemLayoutB_, class SmemLayoutAtomB_, class ElementB_> class NoTranspositionOperandB { public: using TiledMma = TiledMma_; using SmemLayoutB = SmemLayoutB_; using SmemLayoutAtomB = SmemLayoutAtomB_; using ElementB = ElementB_; constexpr CUTLASS_HOST_DEVICE NoTranspositionOperandB( int, int, TiledMma, SmemLayoutB, SmemLayoutAtomB, ElementB) { } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void operator()( TensorSmemB const&, TensorTransposedSmemB const&, int, int) { } CUTLASS_DEVICE void synchronize(int) { } CUTLASS_DEVICE void synchronize() { } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void transpose( TensorSmemB const&, TensorTransposedSmemB const&, int) { } }; template< class TiledMma_, class SmemLayoutB_, class SmemLayoutAtomB_, class ElementB_> class UniversalTranspositionOperandB { public: using TiledMma = TiledMma_; using SmemLayoutB = SmemLayoutB_; using SmemLayoutAtomB = SmemLayoutAtomB_; using ElementB = ElementB_; constexpr CUTLASS_HOST_DEVICE UniversalTranspositionOperandB( int warp_idx_, int warp_group_thread_idx_, TiledMma, SmemLayoutB, SmemLayoutAtomB, ElementB) : warp_idx(warp_idx_) , warp_group_thread_idx(warp_group_thread_idx_) { } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void operator()( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage, int current_step) { if (current_step > 0) { return; } constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup; static_assert(NumMathWarpGroup == 1 || (!detail::use_universal_transposition<SmemLayoutAtomB, ElementB>() && NumMathWarpGroup == 2), "Wrong math warp group number for TransposeB"); constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. constexpr int BytesPerSmemSwizzleUnit = 16; constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); ////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Universal transposition, need warp_group sync between load and store. /// The number of reg used depends on the input elementB. ////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* In one copy step, a warp group would load WarpgroupTileSize * WarpgroupTileSize tile then store to transposed location. In warp_group_tile, each warp holds Four WarpTileSize x WarpTileSize elements: K ------------ | W0 W1 W2 W3 --- | W0 W1 W2 W3 | | W0 W1 W2 W3 | --> Copy Step 0 | W0 W1 W2 W3 --- .... | W0 W1 W2 W3 --- | W0 W1 W2 W3 | | W0 W1 W2 W3 | --> Copy Step n | W0 W1 W2 W3 --- */ static_assert((NumThreadsPerWarpGroup % WarpThreadShapeN == 0), "Unsupported warp thread layout."); constexpr auto WarpgroupThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<NumThreadsPerWarpGroup / WarpThreadShapeN>{})); // Get copy tile and partition to each thread auto sB_tiled_copy = make_tiled_copy( Copy_Atom<DefaultCopy, ElementB>{}, WarpgroupThreadLayout, // thr_layout Layout<_1>{} // val_layout ); static_assert(size(sB_tiled_copy) == size(TiledMma{}), "Wrong thread number in TiledCopy."); auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx); Tensor tCsB = sB_thr_copy.partition_S( sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K) Tensor tCsB_transposed = sB_thr_copy.partition_D(gmma_sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K) // Divide partitioned tile to limit register usage constexpr int CopySteps = size<0>(SmemLayoutB{}) / WarpgroupTileSize; constexpr auto CopyTileShape = make_shape(size<0>(tCsB), Int< size<1>(tCsB) / CopySteps >{}, size<2>(tCsB)); static_assert(size<1>(tCsB) % CopySteps == 0, "CopySteps must evenly divide rank 1 size of partitioned SMEM."); Tensor tCsB_copy_tile = zipped_divide(tCsB, CopyTileShape); Tensor tCsB_copy_tile_transposed = zipped_divide(tCsB_transposed, CopyTileShape); auto transpose_fragment = make_fragment_like(tCsB_copy_tile(_,_0{})); CUTLASS_PRAGMA_NO_UNROLL for (int step = 0; step < CopySteps; ++step) { copy(sB_tiled_copy, tCsB_copy_tile(_,step), transpose_fragment); // Make sure all elements are read before being overwritten __syncthreads(); copy(sB_tiled_copy, transpose_fragment, tCsB_copy_tile_transposed(_,step)); } } CUTLASS_DEVICE void synchronize(int step) { if (step == 0) { // SMEM fence to make sure B is transposed before math cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } } CUTLASS_DEVICE void synchronize() { // SMEM fence to make sure B is transposed before math cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void transpose( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage) { this->operator()(sB, gmma_sB, read_stage, 0); synchronize(); } private: const int warp_idx; const int warp_group_thread_idx; }; template< class TiledMma_, class SmemLayoutB_, class SmemLayoutAtomB_, class ElementB_> class AsyncTranspositionOperandB { public: using TiledMma = TiledMma_; using SmemLayoutB = SmemLayoutB_; using SmemLayoutAtomB = SmemLayoutAtomB_; using ElementB = ElementB_; static constexpr int Steps = 2; static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup; static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; static_assert(NumMathWarpGroup <= 2, "Wrong math warp group number for TransposeB"); static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp; static constexpr int BytesPerSmemSwizzleUnit = 16; static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN; static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1); static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile; static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." ); static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step. static constexpr int64_t WarpTileNCoordLUT = 06723763275316420; static constexpr int64_t WarpTileKCoordLUT = 05410541064206420; static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT. static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits, static constexpr int NumBitsPerStep = 3; static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits) static constexpr int NumBitsPerWarp = 12; // Number of warp_group_tiles static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0, "Copy size must evenly divide SMEM tile."); static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize; static_assert(size<2>(typename TiledMma::AtomShape_MNK{}) <= WarpThreadShapeK, "Need to be able to transpose first k-block in the first step"); constexpr CUTLASS_HOST_DEVICE AsyncTranspositionOperandB( int warp_idx_, int warp_group_thread_idx_, TiledMma, SmemLayoutB, SmemLayoutAtomB, ElementB) : warp_idx(warp_idx_) , warp_group_thread_idx(warp_group_thread_idx_) , warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup) , current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_ % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) , current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_ % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void operator()( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage, int current_step) { if (current_step >= StepsPerWarpGroup) { return; } static constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{})); ////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// A warp group uses 2 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize. /// In each step, one warp would hold two warp_tiles. /// Step 0: Step 1: /// W0 W1 W2 W3 -- -- -- -- /// W1 W0 -- -- -- -- W3 W2 /// W2 -- -- -- -- W3 W0 W1 /// W3 -- -- -- -- W2 W1 W0 /// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /// /// Fully static coord LUT to avoid extra register use. /// [warp_id][step][warp_tile][n / k] /// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7 /// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0 /// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1 /// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2 /// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3 /// /// Encoding the coord of warp tile0 into two int64_t values. /// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern. /// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0. /// The 2-step transposition and the 8-step transposition share the same encoding. /// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Divide entire SMEM to multiple warp_tiles constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>()); Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape); Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape); // Get copy tile auto sB_tiled_copy = make_tiled_copy( Copy_Atom<DefaultCopy, ElementB>{}, WarpThreadLayout, // thr_layout Layout<_1>{} // val_layout ); static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy."); auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx // Construct fragments for transposition Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{})))); decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = { make_fragment_like(tmp_tCsB), make_fragment_like(tmp_tCsB) }; [[maybe_unused]] int step = current_step * NumMathWarpGroup; if constexpr (NumMathWarpGroup == 2) { // For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8. step += warp_idx / (NumWarpsPerWarpGroup * 2); } int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT >> (NumBitsPerStep * current_step); int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT >> (NumBitsPerStep * current_step); if constexpr (NumMathWarpGroup == 2) { tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); } // decoding the warp tile coord. int warp_tile0_n, warp_tile0_k; if constexpr (StepsPerWarpGroup <= NumStepsEncoded) { warp_tile0_n = tmp_warp_tile_n_coord_LUT & MaskPerStep; warp_tile0_k = tmp_warp_tile_k_coord_LUT & MaskPerStep; } else { warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group; warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4; } int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k; int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n; CUTLASS_PRAGMA_UNROLL for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) { static_assert(TilesPerWarp == 2); // [warp_tile][n/k] const int warp_tile_coord[TilesPerWarp][2] = { // n k {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0 {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1 }; CUTLASS_PRAGMA_UNROLL for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { Tensor tCsB = sB_thr_copy.partition_S( flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) ); // (CPY, CPY_N, CPY_K) copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]); } // Make sure elements in two 8x8 warp tiles are all consumed __syncwarp(); CUTLASS_PRAGMA_UNROLL for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { Tensor tCsB_transposed = sB_thr_copy.partition_D( flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) ); // (CPY, CPY_N, CPY_K) copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed); } } // loop warp_group_tile } CUTLASS_DEVICE void synchronize(int step) { if (step < StepsPerWarpGroup) { // SMEM fence to make sure B is transposed before math cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } } CUTLASS_DEVICE void synchronize() { cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void transpose( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage) { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < StepsPerWarpGroup; ++i) { this->operator()(sB, gmma_sB, read_stage, i); } synchronize(); } private: const int warp_idx; const int warp_group_thread_idx; const int warp_idx_in_warp_group; const int current_warp_tile_n_coord_LUT; const int current_warp_tile_k_coord_LUT; }; template< class TiledMma_, class SmemLayoutB_, class SmemLayoutAtomB_, class ElementB_> class AsyncTranspositionOperandB_1BElementB { public: static_assert(sizeof(ElementB_) == 1); using TiledMma = TiledMma_; using SmemLayoutB = SmemLayoutB_; using SmemLayoutAtomB = SmemLayoutAtomB_; using ElementB = ElementB_; static constexpr int Steps = 8; static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup; static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; static_assert(NumMathWarpGroup <= 2, "Wrong math warp group number for TransposeB"); static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp; static constexpr int BytesPerSmemSwizzleUnit = 16; static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN; static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1); static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile; static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." ); static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step. static constexpr int64_t WarpTileNCoordLUT = 06723763275316420; static constexpr int64_t WarpTileKCoordLUT = 05410541064206420; static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT. static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits, static constexpr int NumBitsPerStep = 3; static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits) static constexpr int NumBitsPerWarp = 12; // Number of warp_group_tiles static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0, "Copy size must evenly divide SMEM tile."); static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize; constexpr CUTLASS_HOST_DEVICE AsyncTranspositionOperandB_1BElementB( int warp_idx_, int warp_group_thread_idx_, TiledMma, SmemLayoutB, SmemLayoutAtomB, ElementB) : warp_idx(warp_idx_) , warp_group_thread_idx(warp_group_thread_idx_) , warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup) , current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_ % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) , current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_ % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void operator()( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage, int current_step) { if (current_step > 0) { return; } constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{})); ////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// A warp group uses 8 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize. /// Divide a warp_group_tile into 8x8 warp_tiles to further reduce the reg usage. /// Step 0: Step 1: Step 2: Step 3: /// W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// W1 W0 -- -- -- -- -- -- -- -- W3 W2 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// W2 -- -- -- -- -- -- -- -- W3 W0 W1 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// W3 -- -- -- -- -- -- -- -- W2 W1 W0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W1 W0 -- -- -- -- -- -- -- -- W3 W2 /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W3 W0 W1 /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W2 W1 W0 /// /// Step 4: Step 5: Step 6: Step 7: /// -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 /// W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- /// W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- /// W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- /// W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- /// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /// /// Fully static coord LUT to avoid extra register use. /// [warp_id][step][warp_tile][n / k] /// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7 /// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0 /// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1 /// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2 /// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3 /// /// Encoding the coord of warp tile0 into two int64_t values. /// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern. /// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0. /// The 2-step transposition and the 8-step transposition share the same encoding. /// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Divide entire SMEM to multiple warp_tiles constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>()); Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape); Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape); // Get copy tile auto sB_tiled_copy = make_tiled_copy( Copy_Atom<DefaultCopy, ElementB>{}, WarpThreadLayout, // thr_layout Layout<_1>{} // val_layout ); static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy."); auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx // Construct fragments for transposition Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{})))); decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = { make_fragment_like(tmp_tCsB), make_fragment_like(tmp_tCsB) }; CUTLASS_PRAGMA_NO_UNROLL for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) { int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT; int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT; constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; if constexpr (NumMathWarpGroup == 2) { tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); } CUTLASS_PRAGMA_NO_UNROLL for (int step_per_warp_group = 0; step_per_warp_group < StepsPerWarpGroup; ++step_per_warp_group) { // For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8. int step = step_per_warp_group * NumMathWarpGroup + warp_idx / (NumWarpsPerWarpGroup * 2); // decoding the warp tile coord. int warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group; int warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4; int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k; int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n; tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep; tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep; static_assert(TilesPerWarp == 2); // [warp_tile][n/k] const int warp_tile_coord[TilesPerWarp][2] = { // n k {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0 {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1 }; CUTLASS_PRAGMA_UNROLL for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { Tensor tCsB = sB_thr_copy.partition_S( flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) ); // (CPY, CPY_N, CPY_K) copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]); } // Make sure elements in two 8x8 warp tiles are all consumed __syncwarp(); CUTLASS_PRAGMA_UNROLL for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { Tensor tCsB_transposed = sB_thr_copy.partition_D( flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) ); // (CPY, CPY_N, CPY_K) copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed); } } // lock step } // loop warp_group_tile } CUTLASS_DEVICE void synchronize(int step) { if (step == 0) { // SMEM fence to make sure B is transposed before math cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } } CUTLASS_DEVICE void synchronize() { cutlass::arch::fence_view_async_shared(); cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier); } template < class TensorSmemB, class TensorTransposedSmemB> CUTLASS_DEVICE void transpose( TensorSmemB const& sB, TensorTransposedSmemB const& gmma_sB, int read_stage) { this->operator()(sB, gmma_sB, read_stage, 0); synchronize(); } private: const int warp_idx; const int warp_group_thread_idx; const int warp_idx_in_warp_group; const int current_warp_tile_n_coord_LUT; const int current_warp_tile_k_coord_LUT; }; template< class TiledMma, class SmemLayoutB, class SmemLayoutAtomB, class ElementB, bool TransposeB > constexpr CUTLASS_HOST_DEVICE auto make_transpose_operand_b( int warp_idx, int warp_group_thread_idx, TiledMma, SmemLayoutB, SmemLayoutAtomB, ElementB, cute::bool_constant<TransposeB>) { if constexpr (!TransposeB) { return NoTranspositionOperandB( warp_idx, warp_group_thread_idx, TiledMma{}, SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); } else if constexpr (use_universal_transposition<SmemLayoutAtomB, ElementB>()) { return UniversalTranspositionOperandB( warp_idx, warp_group_thread_idx, TiledMma{}, SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); } else if constexpr (sizeof(ElementB) == 1) { return AsyncTranspositionOperandB_1BElementB( warp_idx, warp_group_thread_idx, TiledMma{}, SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); } else { return AsyncTranspositionOperandB( warp_idx, warp_group_thread_idx, TiledMma{}, SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); } } }; // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace collective } // namespace transform } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp/0
{ "file_path": "cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp", "repo_id": "cutlass", "token_count": 14474 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing computing the addresses of loading small vectors from the global memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedVectorAccessIterator /// template < /// Shape of the vector accessed by the entire threadblock typename Shape, /// Shape of the vector accessed by the warp typename WarpShape, /// Type of Element typename Element, /// Layout of the vector typename Layout, /// Number of elements for each access int ElementsPerAccess, /// Support residual tile bool EnableResidualAccess = false > class PredicatedVectorAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Vector access iterator specialized for vectors, e.g. scale and bias /// Thread arrangements are for TensorOps /// template < typename Shape_, typename WarpShape_, typename Element_, int ElementsPerAccess, bool EnableResidualAccess > class PredicatedVectorAccessIterator < Shape_, WarpShape_, Element_, layout::PitchLinear, ElementsPerAccess, EnableResidualAccess > { public: using Shape = Shape_; using WarpShape = WarpShape_; using Element = Element_; using Layout = layout::PitchLinear; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; // static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; static int const kElementsPerAccess = ElementsPerAccess; static int const kThreads = 32; static int const kRowsPerIteration = 8; static int const kThreadsPerRow = kThreads / kRowsPerIteration; static int const kThreadsPerRowMask = 0x3; static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess); static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided; using AccessType = AlignedArray<Element, kElementsPerAccess>; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Internal pointer to first access of tile BytePointer pointer_; /// Extent of tensor TensorCoord extent_; /// pointer offset of each thread TensorCoord thread_offset_; /// iteration index LongIndex iteration_; /// residual access bool is_residual_; /// residual offset of each thread TensorCoord residual_offset_; public: /// Constructs a vector access iterator CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator( /// Pointer to the start of the vector ConstPointer pointer, /// Extent of vector TensorCoord extent, /// ID of each participating thread int thread_id, /// ID of each participating warp int warp_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), extent_(extent), is_residual_(false) { int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous; // Per-thread offset in logical coordinates of tensor thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) + TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0); set_iteration_index(0); if(EnableResidualAccess) { // compute residual offset typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous; if (residual_size) { is_residual_ = true; residual_offset_ = make_Coord(residual_size, 0); } } } /// Construct a PredicatedVectorAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator( /// Pointer to start of vector ConstPointer pointer, /// Extent of vector TensorCoord extent, ///< ID of each participating thread int thread_id, /// ID of each participating warp int warp_id) : PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_ = index; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { thread_offset_ = thread_offset_ + TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>( pointer_ + ((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess) * sizeof_bits<Element>::value / 8)); } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator &operator++() { ++iteration_; if(iteration_ >= kIterations) iteration_ = 0; return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE void advance() { if(EnableResidualAccess && is_residual_) { is_residual_ = false; thread_offset_ += residual_offset_; } else add_tile_offset(TensorCoord(1, 0)); } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator operator++(int) { PredicatedVectorAccessIterator self(*this); operator++(); return self; } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return ((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous()); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedVectorAccessIterator for row-major data. /// template < typename Shape_, typename WarpShape_, typename Element_, int ElementsPerAccess, bool EnableResidualAccess > class PredicatedVectorAccessIterator< Shape_, WarpShape_, Element_, layout::RowMajor, ElementsPerAccess, EnableResidualAccess > { public: using Shape = Shape_; using WarpShape = WarpShape_; using Element = Element_; using Layout = layout::RowMajor; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedVectorAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>, Element, layout::PitchLinear, ElementsPerAccess, EnableResidualAccess>; using AccessType = typename UnderlyingIterator::AccessType; static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration; static int const kThreads = UnderlyingIterator::kThreads; static int const kIterations = UnderlyingIterator::kIterations; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator( ///< Pointer to the start of the vector ConstPointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< ID of each participating warp int warp_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, warp_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedVectorAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator( ConstPointer pointer, ///< Pointer to the start of the vector TensorCoord extent, ///< Extent of tensor int thread_id, ///< ID of each participating thread int warp_id ///< ID of each participating warp ) : PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedVectorAccessIterator operator++(int) { PredicatedVectorAccessIterator self(*this); operator++(); return self; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE void advance() { iterator_.advance(); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass
cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h", "repo_id": "cutlass", "token_count": 4187 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #pragma once #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/functional.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Wmma array type (WmmaFragmentArray holds elements of type nvcuda::wmma::fragment) template < /// Element type typename T, /// Number of elements in the array int N, /// Whether the element type of T is half_t or __half bool IsHalfType = (platform::is_same<typename T::element_type, cutlass::half_t>::value || platform::is_same<typename T::element_type, __half>::value) > class WmmaFragmentArray: public Array<T, N, true> { public: /// Efficient clear method (override Array::clear()) CUTLASS_HOST_DEVICE void clear() { for(int i = 0; i < Array<T, N, true>::kElements; i++) { nvcuda::wmma::fill_fragment((*this)[i], (typename T::element_type)0); } } CUTLASS_HOST_DEVICE WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs) { using element_type = typename T::element_type; plus<T> add; for (int i = 0; i < Array<T, N, true>::kElements; i++) { (*this)[i] = add((*this)[i], rhs[i]); } return *this; } }; /// Partial specialization for the case in which T::element_type is /// half_t or __half. This is needed because the cast (typename T::element_type)0 /// in the primary template flags as an error when __CUDA_NO_HALF_CONVERSIONS__ /// is set. template < /// Element type typename T, /// Number of elements in the array int N > class WmmaFragmentArray<T, N, true>: public Array<T, N, true> { public: /// Efficient clear method (override Array::clear()) CUTLASS_HOST_DEVICE void clear() { for(int i = 0; i < Array<T, N, true>::kElements; i++) { nvcuda::wmma::fill_fragment((*this)[i], __float2half(0.f)); } } CUTLASS_HOST_DEVICE WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs) { using element_type = typename T::element_type; plus<T> add; for (int i = 0; i < Array<T, N, true>::kElements; i++) { (*this)[i] = add((*this)[i], rhs[i]); } return *this; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// #endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
cutlass/include/cutlass/wmma_array.h/0
{ "file_path": "cutlass/include/cutlass/wmma_array.h", "repo_id": "cutlass", "token_count": 1451 }
39
# CUTLASS 3.0 _CUTLASS 3.0 - January 2023_ CUTLASS is a collection of CUDA C++ template abstractions for implementing high-performance matrix-multiplication (GEMM) at all levels and scales within CUDA. It incorporates strategies for hierarchical decomposition and data movement similar to those used to implement cuBLAS. CUTLASS decomposes these "moving parts" into reusable, modular software components abstracted by C++ template classes. These components can be specialized and tuned via custom tiling sizes, data types, and other algorithmic policies. The resulting flexibility simplifies their use as building blocks within custom kernels and applications. To support a wide variety of applications, CUTLASS provides extensive support for mixed-precision computations, providing specialized data-movement and multiply-accumulate abstractions for 8-bit integer, half-precision floating point (FP16), single-precision floating point (FP32), and double-precision floating point (FP64) types. Furthermore, CUTLASS exploits the _Tensor Cores_ and asynchronous memory copy operations of the latest NVIDIA GPU architectures. # What's New in CUTLASS 3.0 For an overview of CUTLASS 3.0's GEMM interface levels, please refer to the [CUTLASS 3.0 GEMM API document](./gemm_api_3x.md). To learn how to migrate code using CUTLASS 2.x's interface to CUTLASS 3.0, please refer to the [backwards compatibility document](./cutlass_3x_backwards_compatibility.md). # GEMM examples For a code example showing how to define a GEMM kernel using CUTLASS, please refer to [the quickstart guide](./quickstart.md). The [`examples` directory](../../examples) has a variety of examples. # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/doxygen_mainpage.md/0
{ "file_path": "cutlass/media/docs/doxygen_mainpage.md", "repo_id": "cutlass", "token_count": 898 }
40
[build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [project] name = "nvidia-cutlass" version = "3.5.0.0" description = "CUTLASS" readme = "README.md" requires-python = ">=3.8" license = {text = "BSD-3-Clause"} classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ] dependencies = [ "cuda-python>=11.8.0", "networkx", "numpy", "pydot", "scipy", "treelib" ] [project.urls] "Homepage" = "https://github.com/nvidia/cutlass" "Bug Tracker" = "https://github.com/nvidia/cutlass/issues"
cutlass/pyproject.toml/0
{ "file_path": "cutlass/pyproject.toml", "repo_id": "cutlass", "token_count": 252 }
41
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# from pycute import product from cutlass_library import DataTypeSize, DataTypeTag from cutlass.backend.evt.ir import ( # Load Node AccumulatorImpl, AuxLoadImpl, ColumnBroadcastImpl, LoadNode, LoadSrcImpl, RowBroadcastImpl, ScalarBroadcastImpl, # Compute Node ComputeImpl, ComputeNode, # Store Node AuxStoreImpl, ColumnReductionImpl, RowReductionImpl, ScalarReductionImpl, StoreNode, StoreDImpl, ) from cutlass.backend.library import ( FloatRoundStyleTag, FunctionalOp, op_tag, ) class Sm90AccumulatorImpl(AccumulatorImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f"""\nusing {self.name_camel} = cutlass::epilogue::fusion::Sm90AccFetch;\n""" return self._type_decl class Sm90LoadSrcImpl(LoadSrcImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using ElementC = {DataTypeTag[self.element]}; using StrideC = {self.stride_mnl}; using {self.name_camel} = cutlass::epilogue::fusion::Sm90SrcFetch<{DataTypeTag[self.element]}>; """ return self._type_decl class Sm90AuxLoadImpl(AuxLoadImpl): @property def descriptor(self) -> str: """ Descriptor for Aux Load """ return f"{self.name_camel}Descriptor" def decl_descriptor(self) -> str: """ Declare the descriptor type """ return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::AuxLoadDescriptor<EpilogueDescriptor, {self.stride_mnl}, {DataTypeTag[self.element]}>;\n" @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = self.decl_descriptor() self._type_decl += f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxLoad< {self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]}, {self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom, typename {self.descriptor}::CopyOpS2R >; """ return self._type_decl def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): """ Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d """ return (DataTypeSize[self.element] * stages_c * product(epilogue_tile_mn) // 8, 128) class Sm90ScalarBroadcastImpl(ScalarBroadcastImpl): def __init__(self, node: LoadNode) -> None: super().__init__(node) self.broadcast_count = 1 self.reduction_fn = FunctionalOp.Multiplies @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarBroadcast< {DataTypeTag[self.element]}, {self.stride_mnl}, {self.broadcast_count}, {op_tag(self.reduction_fn)} >; """ return self._type_decl class Sm90RowBroadcastImpl(RowBroadcastImpl): @property def descriptor(self) -> str: """ Descriptor for Aux Load """ return f"{self.name_camel}Descriptor" def decl_descriptor(self) -> str: """ Declare the descriptor type """ return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::RowBroadcastDescriptor<EpilogueDescriptor, {DataTypeTag[self.element]}>;\n" @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = self.decl_descriptor() self._type_decl += f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowBroadcast< {self.descriptor}::Stages, typename EpilogueDescriptor::TileShape, typename {self.descriptor}::Element, {self.stride_mnl} >; """ return self._type_decl def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): """ Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d """ stages = (stages_c + epi_tiles - 1) // epi_tiles + 1 return (DataTypeSize[self.element] * cta_tile_mnk[1] * stages // 8, 16) class Sm90ColumnBroadcastImpl(ColumnBroadcastImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColBroadcast< 0 /*Stages*/, typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, {self.stride_mnl} >; """ return self._type_decl class Sm90ComputeImpl(ComputeImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90Compute< {op_tag(self.fn)}, {DataTypeTag[self.element_output]}, {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]} >; """ return self._type_decl class Sm90AuxStoreImpl(AuxStoreImpl): @property def descriptor(self) -> str: """ Descriptor for Aux Load """ return f"{self.name_camel}Descriptor" def decl_descriptor(self) -> str: """ Declare the descriptor type """ return f""" using {self.descriptor} = cutlass::epilogue::collective::detail::AuxStoreDescriptor< EpilogueDescriptor, {self.stride_mnl}, {DataTypeTag[self.element]} >; """ @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = self.decl_descriptor() self._type_decl += f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxStore< {self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]}, {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom, typename {self.descriptor}::CopyOpR2S >; """ return self._type_decl def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): """ Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d """ return (DataTypeSize[self.element] * stages_d * product(epilogue_tile_mn) // 8, 128) class Sm90StoreDImpl(StoreDImpl): @property def type_decl(self): """ Return the string defining the type """ return f""" using ElementD = {DataTypeTag[self.element]}; using StrideD = {self.stride_mnl}; """ class Sm90ColumnReductionImpl(ColumnReductionImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColReduction< {op_tag(self.reg_reduce_fn)}, {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0, typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl} >; """ return self._type_decl class Sm90RowReductionImpl(RowReductionImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowReduction< {op_tag(self.reg_reduce_fn)}, {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0 /* Stages */, typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl} >; """ return self._type_decl class Sm90ScalarReductionImpl(ScalarReductionImpl): @property def type_decl(self): """ Return the string defining the type """ if self._type_decl is not None: return self._type_decl self._type_decl = f""" using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarReduction< {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, {DataTypeTag[self.element]}, {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl} >; """ return self._type_decl
cutlass/python/cutlass/backend/evt/backend/sm90_nodes.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/backend/sm90_nodes.py", "repo_id": "cutlass", "token_count": 4460 }
42
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Construct the epilogue visitor argument type """ from cutlass.backend.c_types import visitor_factory from cutlass.backend.evt.ir import TopoVisitorNode from cutlass.backend.evt.passes.pass_dag_2_tree import PassDAG2Tree from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl from cutlass.backend.evt.passes.pass_manager import EVTPassBase from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation class PassGetArgumentType(EVTPassBase): """ Construct the epilogue visitor argument type """ dependencies = [ PassShapeTypePropagation, # The Layout of all nodes must be set PassDAG2Tree, # The type of each node must be set PassGetImpl # The DAG subgraphs must be set ] def requires(self) -> None: # Check "D" is in the node list if self.cc == 90 and (not self.dag_ir.has_node("D")): raise SyntaxError( "Sm90 EVT requires the epilogue to have a returned tensor D, " "but the variable 'D' is not found in the return values.") def call(self): nodes = self.dag_ir.nodes_topological_order() self.argument_types = {} for node in nodes: meta = self.dag_ir.get_node_meta(node) if not meta.disabled: self.argument_types[node] = meta.underlying_impl.argument_type if node == "D" and self.cc == 90: continue if isinstance(meta, TopoVisitorNode): self.get_dag_argument_type(node) else: self.get_evt_argument_type(node) self.cc_specific_method(self.set_argument_type)() def get_evt_argument_type(self, node): # Sort the input nodes by edge weight input_types = [self.argument_types[child] for child in self.dag_ir.get_all_inputs(node)] if len(input_types) > 0: self.argument_types[node] = visitor_factory( input_types + [self.argument_types[node],], self.dag_ir.get_all_inputs(node) + [node,]) def get_dag_argument_type(self, node): meta = self.dag_ir.get_node_meta(node) subgraph = meta.subgraph subgraph_nodes = subgraph.nodes_topological_order() # Visit the unvisited nodes in subgraph for n in subgraph_nodes: m = subgraph.get_node_meta(n) if m.disabled: continue else: self.argument_types[n] = m.underlying_impl.argument_type input_types = [self.argument_types[child] for child in subgraph_nodes[:-1]] if len(input_types) > 0: self.argument_types[node] = visitor_factory(input_types, subgraph_nodes[:-1]) def set_argument_type(self): pass def sm90_set_argument_type(self): self.dag_ir.epilogue_thread_type = self.argument_types[self.dag_ir.get_all_inputs("D")[0]] # Get the tensorD argument type self.dag_ir.arg_d_type = self.dag_ir.get_node_meta("D").underlying_impl.argument_type_d # Get the tensorC argument type if self.dag_ir.has_node("C"): self.dag_ir.arg_c_type = self.dag_ir.get_node_meta("C").underlying_impl.argument_type_c else: self.dag_ir.arg_c_type = self.dag_ir.arg_d_type def sm80_set_argument_type(self): nodes = self.dag_ir.nodes_topological_order() self.dag_ir.epilogue_thread_type = self.argument_types[nodes[-1]]
cutlass/python/cutlass/backend/evt/passes/pass_argument_type.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/passes/pass_argument_type.py", "repo_id": "cutlass", "token_count": 2058 }
43
################################################################################ # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ import ctypes from typing import Union from cuda import cuda, cudart import numpy as np from cutlass_library import ( DataTypeNames, DataTypeSize, DataTypeTag, LayoutType, SubstituteTemplate ) import cutlass from cutlass.backend.c_types import MatrixCoord_, TensorRef2D_, get_reduction_params from cutlass.backend.frontend import NumpyFrontend, TorchFrontend from cutlass.backend.library import TensorDescription from cutlass.backend.memory_manager import DevicePtrWrapper from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration from cutlass.shape import MatrixCoord from cutlass.utils.datatypes import is_numpy_tensor, is_torch_tensor class ReductionOperation: pass class ReductionArguments: """ Arguments of reduction """ def __init__( self, operation: ReductionOperation, problem_size: "list[int]", partitions: int, workspace: cuda.CUdeviceptr, destination: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]", source: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]", **kwargs, ) -> None: # tensor_C can be interpreted as the bias with bias=True in keyword args if "bias" in kwargs.keys(): self.bias = kwargs["bias"] else: # by default, tensor_C is not bias self.bias = False if "stream" in kwargs.keys(): self.stream = kwargs["stream"] else: self.stream = cuda.CUstream(0) self.operation = operation self.ptr_workspace = workspace # number of split-k partitions self.partitions = partitions if is_numpy_tensor(destination): self.host_D = destination self.destination_buffer = NumpyFrontend.argument(destination, True) self.source_buffer = NumpyFrontend.argument(source, False) self.ptr_destination = cuda.CUdeviceptr(self.destination_buffer.ptr) self.ptr_source = cuda.CUdeviceptr(self.source_buffer.ptr) elif is_torch_tensor(destination): self.ptr_destination = TorchFrontend.argument(destination) self.ptr_source = TorchFrontend.argument(source) elif isinstance(destination, cuda.CUdeviceptr): self.ptr_destination = destination self.ptr_source = source else: raise TypeError("unknown Type") self.problem_size = MatrixCoord_(problem_size[0], problem_size[1]) self.partition_stride = ( problem_size[0] * problem_size[1] * DataTypeSize[operation.C.element] // 8 ) if "output_op" in kwargs.keys(): self.output_op = kwargs["output_op"] else: self.output_op = self.operation.epilogue_type(1.0, 0.0) self.get_arguments() @staticmethod def get_tensor_ref( extent: "tuple[int]", device_ptr: cuda.CUdeviceptr, layout: LayoutType, ): if layout == LayoutType.RowMajor: return TensorRef2D_(int(device_ptr), extent[1]) else: raise ValueError(f"Unknown layout type {layout}") def get_arguments(self): ref_workspace = ReductionArguments.get_tensor_ref( extent=[ self.problem_size.row, self.problem_size.column, ], device_ptr=self.ptr_workspace, layout=LayoutType.RowMajor, ) if self.bias: ref_source = ReductionArguments.get_tensor_ref( extent=[0, 0], device_ptr=self.ptr_source, layout=LayoutType.RowMajor, ) else: ref_source = ReductionArguments.get_tensor_ref( extent=[ self.problem_size.row, self.problem_size.column, ], device_ptr=self.ptr_source, layout=LayoutType.RowMajor, ) ref_destination = ReductionArguments.get_tensor_ref( extent=[ self.problem_size.row, self.problem_size.column, ], device_ptr=self.ptr_destination, layout=LayoutType.RowMajor, ) self.c_arguments = self.operation.argument_type( self.problem_size, self.partitions, self.partition_stride, ref_workspace, ref_destination, ref_source, self.output_op, ) params_ = self.operation.rt_module.get_args(ctypes.byref(self.c_arguments)) self.host_workspace = bytearray(params_.contents) def sync(self): (err,) = cudart.cudaDeviceSynchronize() if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError(f"CUDA Error {str(err)}") if hasattr(self, "host_D"): (err,) = cuda.cuMemcpyDtoH( self.host_D, self.ptr_destination, self.host_D.size * self.host_D.itemsize, ) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError("CUDA Error %s" % str(err)) self.free() def free(self): """ Frees allocated device-side memory """ # Free any device memory allocated manually if not cutlass.use_rmm: for attr in ["destination_buffer", "source_buffer"]: if hasattr(self, attr): buf = getattr(self, attr) if isinstance(buf, DevicePtrWrapper): err, = cudart.cudaFree(buf.ptr) if err != cudart.cudaError_t.cudaSuccess: raise RuntimeError(f"cudaFree failed with error {err}") del buf class ReductionRT(ExecutableOperation): """ ReductionRT manages the CUTLASS runtime components for reduction """ KernelTemplate = r""" extern "C" __global__ void ${operation_name}(${operation_name}${operation_suffix}::Params params) { // Dynamic shared memory base pointer extern __shared__ int SharedStorageBase[]; // Declare pointer to dynamic shared memory. ${operation_name}${operation_suffix}::SharedStorage *shared_storage = reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); ${operation_name}${operation_suffix} op; op(params, *shared_storage); } """ HostTemplate = r""" extern "C" { // Get the size of params in bytes int ${operation_name}_get_param_size(){ return sizeof(${operation_name}${operation_suffix}::Params); } // Get the size of dynamic shared memory in bytes int ${operation_name}_shared_memory_size() { return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); } // Get the params as byte array char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){ char *bytes = ((char*)(params)); char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)]; for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++) output[i] = bytes[i]; return output; } } """ def __init__(self, operation: ReductionOperation): super().__init__(operation) self.operation: ReductionOperation = operation self.emitter = EmitReductionInstance("_type") self.elements_per_access = self.operation.count ( self.argument_type, self.epilogue_type, ) = get_reduction_params(operation.epilogue_functor) self.argtype = [ctypes.POINTER(self.argument_type)] def emit(self): return self.emitter.emit(self.operation) def plan(self, arguments: ReductionArguments): block_shape = [ self.operation.shape.column // self.elements_per_access, self.operation.shape.row, 1, ] grid_shape = [ (arguments.problem_size.row + self.operation.shape.row - 1) // self.operation.shape.row, (arguments.problem_size.column + self.operation.shape.column - 1) // self.operation.shape.column, 1, ] return LaunchConfiguration( grid_shape, block_shape, self.shared_memory_capacity, ) def initialize(self): (err,) = cuda.cuFuncSetAttribute( self.kernel, attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, value=self.shared_memory_capacity, ) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError(f"CUDA Error: {err}") class ReductionOperation: """ CUTLASS reduction Operation """ def __init__( self, shape: MatrixCoord, C: TensorDescription, element_accumulator, element_workspace=None, element_compute=None, epilogue_functor=None, count: int = 1, partitions_per_stage: int = 4, ) -> None: self.shape = shape self.epilogue_functor = epilogue_functor self.element_accumulator = element_accumulator if element_workspace is None: self.element_workspace = element_accumulator else: self.element_workspace = element_workspace if element_compute is None: self.element_compute = element_accumulator else: self.element_compute = element_compute self.element_output = C.element self.C: TensorDescription = C # Reduce op processing size self.count: int = count # Number of partitions to reduce per stage self.partitions_per_stage: int = partitions_per_stage self.rt_module: ReductionRT = ReductionRT(self) self.argument_type = self.rt_module.argument_type self.epilogue_type = self.rt_module.epilogue_type def extended_name(self): extend_name = "${element_workspace}_${element_accumulator}_${element_compute}_${element_output}" return SubstituteTemplate( extend_name, { "element_workspace": DataTypeNames[self.element_workspace], "element_accumulator": DataTypeNames[self.element_accumulator], "element_compute": DataTypeNames[self.element_compute], "element_output": DataTypeNames[self.element_output], }, ) def configuration_name(self): """The full procedural name indicates architecture, extended name, tile size""" configuration_name = "cutlass_reduce_split_k_${extended_name}_${threadblock}" threadblock = "%dx%d" % ( self.shape.row, self.shape.column, ) return SubstituteTemplate( configuration_name, { "extended_name": self.extended_name(), "threadblock": threadblock, }, ) def procedural_name(self): """The full procedural name indicates architeture, extended name, tile size""" return self.configuration_name() def run(self, arguments: ReductionArguments) -> cuda.CUresult: """ Configure and launch the cuda kernel with input arguments """ launch_config = self.rt_module.plan(arguments) host_workspace = arguments.host_workspace device_workspace = None err = self.rt_module.run( host_workspace, device_workspace, launch_config, arguments.stream ) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError(f"CUDA Error {str(err)}") return err class EmitReductionInstance: def __init__(self, operation_suffix="") -> None: self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/numeric_types.h", "cutlass/arch/arch.h", "cutlass/arch/mma.h", "cutlass/layout/matrix.h", "cutlass/gemm/device/gemm.h", "cutlass/gemm/device/gemm_universal_adapter.h", "cutlass/gemm/kernel/default_gemm_universal.h", "cutlass/reduction/kernel/reduce_split_k.h", "cutlass/reduction/thread/reduction_operators.h", ] self.template = """ // Reduction kernel instance using ${operation_name}_base = typename cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<${shape_row}, ${shape_column}>, ${epilogue_functor}, cutlass::reduction::thread::ReduceAdd< ${element_accumulator}, ${element_output}, ${count}>, ${partition_per_stage}>; struct ${operation_name}${operation_suffix}: public ${operation_name}_base { }; """ def emit(self, operation: ReductionOperation): vector_length_bits = min(operation.C.alignment * DataTypeSize[operation.C.element], 128) epilogue_vector_length = vector_length_bits // DataTypeSize[operation.C.element] values = { "operation_name": operation.configuration_name(), "operation_suffix": self.operation_suffix, "shape_row": str(operation.shape.row), "shape_column": str(operation.shape.column), "epilogue_functor": operation.epilogue_functor.emit(), "element_output": DataTypeTag[operation.element_output], "epilogue_vector_length": str(epilogue_vector_length), "element_accumulator": DataTypeTag[operation.element_accumulator], "element_compute": DataTypeTag[operation.element_compute], "element_workspace": DataTypeTag[operation.element_workspace], "count": str(operation.count), "partition_per_stage": str(operation.partitions_per_stage), } return SubstituteTemplate(self.template, values)
cutlass/python/cutlass/backend/reduction_operation.py/0
{ "file_path": "cutlass/python/cutlass/backend/reduction_operation.py", "repo_id": "cutlass", "token_count": 6773 }
44
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for expressing shapes """ from cutlass_library import ( ConvMode, ConvKind, LayoutType ) from cutlass.backend.c_types import ( Conv2DProblemSize_, GemmCoord_, GemmCoordBatched_ ) class MatrixCoord: def __init__(self, row, col): self._row = row self._col = col @property def row(self): return self._row @property def column(self): return self._col def leading_dimension(self, layout: LayoutType) -> int: """ Returns the leading dimension for a matrix with layout ``layout`` and shape provided by the MatrixCoord. :param layout: layout of matrix :type layout: cutlass_library.LayoutType :returns: leading dimension :rtype: int """ if layout == LayoutType.RowMajor: return self._col elif layout == LayoutType.ColumnMajor: return self._row else: raise Exception(f'Unsupported layout for leading dimension calculation: {layout}') class GemmCoord: def __init__(self, m: int, n: int, k: int): self._m = m self._n = n self._k = k @property def m(self) -> int: return self._m @property def n(self) -> int: return self._n @property def k(self) -> int: return self._k @property def mk(self) -> MatrixCoord: return MatrixCoord(self._m, self._k) @property def mn(self) -> MatrixCoord: return MatrixCoord(self._m, self._n) @property def kn(self) -> MatrixCoord: return MatrixCoord(self._k, self._n) @property def ctype(self) -> GemmCoord_: return GemmCoord_(self._m, self._n, self._k) def batched_ctype(self, batch_count: int) -> GemmCoordBatched_: return GemmCoordBatched_(self._m, self._n, self._k, batch_count) class Conv2DProblemSize: def __init__( self, n: int, h: int, w: int, c: int, k: int, r: int, s: int, c_: int, pad_h: int, pad_w: int, stride_h: int, stride_w: int, dilation_h: int, dilation_w: int, mode: ConvMode=ConvMode.CrossCorrelation, split_k_slices: int=1, groups: int=1): self.N = n self.H = h self.W = w self.C = c self.K = k self.R = r self.S = s self.pad_h = pad_h self.pad_w = pad_w self.stride_h = stride_h self.stride_w = stride_w self.dilation_h = dilation_h self.dilation_w = dilation_w self.mode = int(mode) self.split_k_slices = split_k_slices self.groups = groups self.P = ((h + pad_h * 2 - r * dilation_h) // stride_h) + 1 self.Q = ((w + pad_w * 2 - s * dilation_w) // stride_w) + 1 @property def ctype(self) -> Conv2DProblemSize_: return Conv2DProblemSize_(self) def implicit_gemm_size(self, kind: ConvKind): if kind == ConvKind.Fprop: return GemmCoord( self.N * self.P * self.Q, self.K, self.R * self.S * self.C // self.groups ) elif kind == ConvKind.Dgrad: return GemmCoord( self.N * self.H * self.W, self.C, self.R * self.S * self.K ) elif kind == ConvKind.Wgrad: return GemmCoord( self.K, self.R * self.S * self.C, self.N * self.P * self.Q ) @staticmethod def from_sizes(input_size, weight_size): K, R, S, _ = weight_size pad_h = R // 2 pad_w = S // 2 stride_h = 1 stride_w = 1 dilation_h = 1 dilation_w = 1 return Conv2DProblemSize( *input_size, *weight_size, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w )
cutlass/python/cutlass/shape.py/0
{ "file_path": "cutlass/python/cutlass/shape.py", "repo_id": "cutlass", "token_count": 2440 }
45
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
cutlass/python/docs_src/Makefile/0
{ "file_path": "cutlass/python/docs_src/Makefile", "repo_id": "cutlass", "token_count": 252 }
46
.. CUTLASS Python interface documentation master file, created by sphinx-quickstart on Mon Feb 13 17:57:39 2023. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. .. include:: ../../README.md :start-line: 1 :parser: markdown .. toctree:: :hidden: Home <self> .. toctree:: :hidden: :caption: Getting Started: install.md Getting Started <externals/00_basic_gemm.nblink> contribute.md .. toctree:: :hidden: :caption: Python Documentation: modules.rst .. toctree:: :hidden: :caption: Examples and Tutorials: examples.rst .. toctree:: :hidden: :caption: Advanced: .. toctree:: :hidden: :caption: FAQ: .. toctree:: :hidden: :caption: Reference: Github <https://github.com/NVIDIA/cutlass> Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
cutlass/python/docs_src/source/index.rst/0
{ "file_path": "cutlass/python/docs_src/source/index.rst", "repo_id": "cutlass", "token_count": 351 }
47
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <iostream> #include <iomanip> #include <utility> #include <cute/container/array_subbyte.hpp> #include <cute/tensor.hpp> #include <cute/numeric/numeric_types.hpp> TEST(CuTe_core, ArraySubbyte) { using namespace cute; { array_subbyte<int4_t, 10> array0{}; array_subbyte<int4_t, 5> array1{}; fill(array0, int4_t(0)); fill(array1, int4_t(1)); for (size_t i = 0; i < array1.size(); ++i) { array0[i+5] = array1[i]; } EXPECT_EQ(int4_t(array0.back()), int4_t(1)); for (size_t i = 0; i < array1.size(); ++i) { EXPECT_EQ(int4_t(array0[i]), int4_t(int(i) / 5)); } } { array_subbyte<uint8_t, 14> a{}; //std::cout << sizeof_bits<decltype(a)>::value << std::endl; EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 14*8); fill(a, uint8_t(13)); for (int i = 0; i < int(a.size()); ++i) { //std::cout << i << ": " << int(a[i]) << " -> "; EXPECT_EQ(a[i], uint8_t(13)); a[i] = uint8_t(i); //std::cout << int(a[i]) << std::endl; EXPECT_EQ(a[i], uint8_t(i)); } //std::cout << std::endl; } { array_subbyte<int4_t, 14> a{}; //std::cout << sizeof_bits<decltype(a)>::value << std::endl; EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 14/2*8); fill(a, int4_t(-5)); for (int i = 0; i < int(a.size()); ++i) { //std::cout << i << ": " << int4_t(a[i]) << " -> "; EXPECT_EQ(int4_t(a[i]), int4_t(-5)); a[i] = int4_t(i); //std::cout << int4_t(a[i]) << std::endl; EXPECT_EQ(int4_t(a[i]), int4_t(i)); } //std::cout << std::endl; } { array_subbyte<uint2_t, 14> a{}; //std::cout << sizeof_bits<decltype(a)>::value << std::endl; EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 4*8); fill(a, uint2_t(-5)); for (int i = 0; i < int(a.size()); ++i) { //std::cout << i << ": " << uint2_t(a[i]) << " -> "; EXPECT_EQ(uint2_t(a[i]), uint2_t(-5)); a[i] = uint2_t(i); //std::cout << uint2_t(a[i]) << std::endl; EXPECT_EQ(uint2_t(a[i]), uint2_t(i)); } //std::cout << std::endl; } { array_subbyte<bool, 14> a{}; //std::cout << sizeof_bits<decltype(a)>::value << std::endl; EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 2*8); fill(a, bool(1)); for (int i = 0; i < int(a.size()); ++i) { //std::cout << i << ": " << bool(a[i]) << " -> "; EXPECT_EQ(a[i], bool(1)); a[i] = bool(i % 2); //std::cout << bool(a[i]) << std::endl; EXPECT_EQ(a[i], bool(i % 2)); } //std::cout << std::endl; } } TEST(CuTe_core, Subbyte_iterator) { using namespace cute; { array_subbyte<uint8_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<uint8_t>(a.raw_data()), make_shape(15)); fill(a, uint8_t(13)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(uint8_t(tensor(i)), 13); tensor(i) = uint8_t(i); EXPECT_EQ(a[i], uint8_t(tensor(i))); } } { array_subbyte<int4_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<int4_t>(a.raw_data()), make_shape(15)); fill(a, int4_t(-5)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(int4_t(tensor(i)), int4_t(-5)); tensor(i) = int4_t(i); EXPECT_EQ(int4_t(a[i]), int4_t(tensor(i))); } } { array_subbyte<uint2_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<uint2_t>(a.raw_data()), make_shape(15)); fill(a, uint2_t(-5)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(uint2_t(tensor(i)), uint2_t(-5)); tensor(i) = uint2_t(i); EXPECT_EQ(uint2_t(a[i]), uint2_t(tensor(i))); } } { array_subbyte<bool, 15> a{}; auto tensor = make_tensor(subbyte_iterator<bool>(a.raw_data()), make_shape(15)); fill(a, bool(1)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(bool(tensor(i)), bool(1)); tensor(i) = bool(i % 2); EXPECT_EQ(a[i], bool(tensor(i))); } } } TEST(CuTe_core, Const_subbyte_iterator) { using namespace cute; { array_subbyte<uint8_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<uint8_t const>(a.raw_data()), make_shape(15)); fill(a, uint8_t(13)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(uint8_t(tensor(i)), 13); a[i] = uint8_t(i); EXPECT_EQ(a[i], uint8_t(tensor(i))); } } { array_subbyte<int4_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<int4_t const>(a.raw_data()), make_shape(15)); fill(a, int4_t(-5)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(int4_t(tensor(i)), int4_t(-5)); a[i] = int4_t(i); EXPECT_EQ(int4_t(a[i]), int4_t(tensor(i))); } } { array_subbyte<uint2_t, 15> a{}; auto tensor = make_tensor(subbyte_iterator<uint2_t const>(a.raw_data()), make_shape(15)); fill(a, uint2_t(-5)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(uint2_t(tensor(i)), uint2_t(-5)); a[i] = uint2_t(i); EXPECT_EQ(uint2_t(a[i]), uint2_t(tensor(i))); } } { array_subbyte<bool, 15> a{}; auto tensor = make_tensor(subbyte_iterator<bool const>(a.raw_data()), make_shape(15)); fill(a, bool(1)); for (int i = 0; i < int(a.size()); ++i) { EXPECT_EQ(bool(tensor(i)), bool(1)); a[i] = bool(i % 2); EXPECT_EQ(a[i], bool(tensor(i))); } } }
cutlass/test/unit/cute/core/array_subbyte.cpp/0
{ "file_path": "cutlass/test/unit/cute/core/array_subbyte.cpp", "repo_id": "cutlass", "token_count": 3076 }
48
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests Generic CuTe Layouts */ #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/layout/layout.h" #include "cutlass/matrix_coord.h" // Cute includes #include <cute/layout.hpp> #include <cute/int_tuple.hpp> using namespace cutlass; using namespace cute; namespace test { namespace layout { template <typename GenericLayout, typename Layout> struct Testbed { Testbed() {} bool run() { GenericLayout generic_layout; Layout layout = Layout::packed({size<0>(generic_layout), size<1>(generic_layout)}); for (int m = 0; m < size<0>(generic_layout); m++) { for (int n = 0; n < size<1>(generic_layout); n++) { if (generic_layout(m, n) != layout({m, n})) return false; } } return true; } }; } } ////////////////////////////////////////////////////////////////////////// // Test Generic CuTe Layouts ////////////////////////////////////////////////////////////////////////// /// Canonical Layouts TEST(GenericLayout, ColumnMajor) { using GenericLayout = cute::Layout<Shape<_8, _4>, Stride<_1, _8>>; using Layout = cutlass::layout::ColumnMajor; test::layout::Testbed<GenericLayout, Layout> testbed; EXPECT_TRUE(testbed.run()); } ////////////////////////////////////////////////////////////////////////// TEST(GenericLayout, RowMajor) { using GenericLayout = cute::Layout<Shape<_8, _4>, Stride<_4, _1>>; using Layout = cutlass::layout::RowMajor; test::layout::Testbed<GenericLayout, Layout> testbed; EXPECT_TRUE(testbed.run()); } ////////////////////////////////////////////////////////////////////////// /// Swizzle Shared Memory layouts TEST(GenericLayout, RowMajorTensorOpMultiplicandCrosswise) { using GenericLayout = decltype( composition( Swizzle<3,3,3>{}, Layout<Shape<_128, _64>, Stride<_64, _1>>{}) ); using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<cutlass::half_t>::value, 64>; test::layout::Testbed<GenericLayout, Layout> testbed; EXPECT_TRUE(testbed.run()); } ////////////////////////////////////////////////////////////////////////// TEST(GenericLayout, ColumnMajorTensorOpMultiplicandCongruous) { using GenericLayout = decltype( composition( Swizzle<3,3,4>{}, Layout<Shape<_128, _64>>{}) ); using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<cutlass::half_t>::value, 64>; test::layout::Testbed<GenericLayout, Layout> testbed; EXPECT_TRUE(testbed.run()); } //////////////////////////////////////////////////////////////////////////
cutlass/test/unit/cute/layout/layout_operator.cu/0
{ "file_path": "cutlass/test/unit/cute/layout/layout_operator.cu", "repo_id": "cutlass", "token_count": 1383 }
49
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination_drelu.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h" #include "cutlass/epilogue/threadblock/epilogue_with_reduction.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "epilogue_with_reduction_testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // // Disable selected tests on CUDA 11.1 // // #define ENABLE_BLOCKED_TESTS (!(__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ == 1)) ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x64_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x64_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x64_64x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// #if ENABLE_BLOCKED_TESTS TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x64_64x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x128_32x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x128_32x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_256x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_256x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu< ElementAccumulator, ElementAccumulator, ElementOutput, ElementOutput, kElementsPerAccess >; using ReductionOp = cutlass::plus<ElementAccumulator>; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, ElementOutput, OutputOp, ReductionOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueWithReductionTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu/0
{ "file_path": "cutlass/test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu", "repo_id": "cutlass", "token_count": 8687 }
50
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with: A: row major, of type FE4M4 or FE5M2 B: column major, of type FE4M3 or FE5M2 C: row major, of FE4M3 or FE5M2 Accum: F32 */ #include <iostream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic_with_scaling.h" #include "cutlass/gemm/device/gemm_universal_with_absmax.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "testbed.h" #include "testbed_with_absmax.h" #if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED) //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_fastacc_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; static int const kAlignment = 16; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages, kAlignment, kAlignment, cutlass::arch::OpMultiplyAddFastAccum >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, relu_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::ReLu, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::ReLu>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e5m2_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe5m2t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) { using ElementA = cutlass::float_e5m2_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) { using ElementA = cutlass::float_e5m2_t; using ElementB = cutlass::float_e5m2_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe5m2t_tensor_op_f32, identity_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e5m2_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe5m2t_tensor_op_f32, identity_diff_aux_output_types_128x256x64_64x64x64) { using ElementA = cutlass::float_e5m2_t; using ElementB = cutlass::float_e5m2_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = cutlass::float_e5m2_t; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x128x64_32x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 128, 64>, cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noScale_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>( /* scaleA = */false, /* scaleB = */false, /* scaleC = */false ); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noAux_128x256x64_64x64x64) { using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = float; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::Identity, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages >; bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(); EXPECT_TRUE(passed); } //////////////////////////////////////////////////////////////////////////////// #endif // CUTLASS_ARCH_MMA_SM89_SUPPORTED
cutlass/test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu/0
{ "file_path": "cutlass/test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu", "repo_id": "cutlass", "token_count": 6677 }
51
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with an elementwise tensor-tensor broadcast epilogue */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cutlass/numeric_types.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp" #include "cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp" #include "../../common/cutlass_unit_test.h" #include "gemm_testbed_3x_tensor_broadcast.hpp" #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) using namespace cute; /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32_tensor_broadcast, 128x128x128_2x2x1_ActReLU_Bin0Mul_Bin1Plus_UnaryHardSwish) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using ElementOutput = int32_t; using ElementAccumulator = ElementOutput; using ElementCompute = ElementOutput; using ElementBias = ElementOutput; using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, int8_t, LayoutA, 16, int8_t, LayoutB, 16, int32_t, Shape<_128,_128,_128>, Shape<_2,_2,_1>, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::EpilogueTensorBroadcast< cutlass::gemm::TagToStrideC_t<LayoutC>, cutlass::gemm::TagToStrideC_t<LayoutC>, cutlass::epilogue::thread::LinearCombinationTensorBroadcast< ElementOutput, ElementAccumulator, ElementCompute, ElementBias, cutlass::epilogue::thread::ReLu, cutlass::multiplies, cutlass::plus, cutlass::epilogue::thread::HardSwish >, cutlass::gemm::EpilogueDefault>>; EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled); EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled); EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled); using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveOp, EpilogueOp >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu/0
{ "file_path": "cutlass/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu", "repo_id": "cutlass", "token_count": 1527 }
52
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Rank 2k update interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/rank_k_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename RankK> struct TestbedRank2KUniversal { using ElementA = typename RankK::ElementA; using ElementC = typename RankK::ElementC; using ElementAccumulator = typename RankK::ElementAccumulator; using ElementCompute = typename RankK::RankKkernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename RankK::ElementA, typename RankK::LayoutA> tensor_A; cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> tensor_C; cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> tensor_D; cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> reference_D; // // Methods // TestbedRank2KUniversal( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename RankK::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Input distribution not implemented"; return false; } return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_symmetric_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename RankK::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillSymmetricRandomUniform( view, seed, RankK::kFillModeC, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillSymmetricRandomGaussian( view, seed, RankK::kFillModeC, 0, 0.5, mantissa_in_bits); } else { EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the RankK workspace // tensor_A.resize(problem_size.mk()); tensor_C.resize(problem_size.mn()); tensor_D.resize(problem_size.mn()); reference_D.resize(problem_size.mn(), false); EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename RankK::ElementA>::bits)); EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename RankK::ElementC>::bits)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename RankK::ElementA(1); tensor_C.host_view().at({0, 0}) = typename RankK::ElementC(1); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (tensor_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); if (reference_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); bool passed = l2_norm < cutlass::MantissaInBits<typename RankK::ElementA>::error; return passed; } /// Verifies the result is a RankK bool verify( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { // // Verify // cutlass::reference::host::Rank2KComplex< typename RankK::ElementA, typename RankK::LayoutA, typename RankK::ElementC, typename RankK::LayoutC, ElementCompute, ElementAccumulator >( problem_size, alpha, tensor_A.host_ref(), RankK::kTransformA, beta, tensor_C.host_ref(), reference_D.host_ref(), ElementAccumulator(0), RankK::kFillModeC, RankK::kBlasMode ); return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename RankK::RankKkernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 std::cout << "[TestbedRankKUniversal::run()] problem(m, n, k): " << problem_size << " alpha: " << ElementCompute(alpha) << " beta: " << ElementCompute(beta) << std::endl; #endif this->initialize(problem_size); // // Initialize the RankK operator // typename RankK::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_C.device_data(), tensor_D.device_data(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0) }; RankK rank2k_op; size_t workspace_size = RankK::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the RankK // status = rank2k_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta); //if (true) { if (!passed) { std::stringstream fname; fname << "error_RankK_device_" << "fill_mode_c_" << (RankK::kFillModeC == cutlass::FillMode::kLower ? "lower_" : (RankK::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) << "mnk_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << RankK::ThreadblockShape::kM << "x" << RankK::ThreadblockShape::kN << "x" << RankK::ThreadblockShape::kK << "_" << RankK::WarpShape::kM << "x" << RankK::WarpShape::kN << "x" << RankK::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nD reference:\n" << reference_D.host_view() << "\n" << "\nD computed:\n" << tensor_D.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename RankK> bool TestRank2kUniversal( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedRank2KUniversal<RankK> testbed; using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); return passed; } template <typename RankK> bool TestAllRankKUniversal() { bool passed = true; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename RankK::ElementA>::value); int const kAlignmentN = 128 / kMinimumOperandElementSize; int const kAlignmentK = 128 / kMinimumOperandElementSize; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, RankK::ThreadblockShape::kK * RankK::kStages - kAlignmentK, RankK::ThreadblockShape::kK * RankK::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; double problem_alpha[] = { 1.0 }; double problem_beta[] = { 2.0 }; using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<RankK> testbed; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); if (!passed) { return false; } } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/testbed_rank_k_universal.h/0
{ "file_path": "cutlass/test/unit/gemm/device/testbed_rank_k_universal.h", "repo_id": "cutlass", "token_count": 6120 }
53
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) //////////////////////////////////////////////////////////////////////////////// /// F32 <= F16 * I8 + F32 (Upcast on Operand B) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_i8, 128x128x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::half_t; using ElementB = int8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<128, 128, 64> >() .run(); } TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_i8, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::half_t; using ElementB = int8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= I8 * F16 + F32 (Upcast on Operand A) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_f16, 128x128x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = int8_t; using ElementB = cutlass::half_t;; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<128, 128, 64> >() .run(); } TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_f16, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = int8_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= F16 * U8 + F32 (Upcast on Operand B) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_u8, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::half_t; using ElementB = uint8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_u8, 128x128x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::half_t; using ElementB = uint8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<128, 128, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= U8 * F16 + F32 (Upcast on Operand A) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_f16, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = uint8_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_f16, 128x128x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = uint8_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<128, 128, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= B16 * U8 + F32 (Upcast on Operand B) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_bf16_u8, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::bfloat16_t; using ElementB = uint8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= U8 * BF16 + F32 (Upcast on Operand A) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_bf16, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = uint8_t; using ElementB = cutlass::bfloat16_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= I8 * BF16 + F32 (Upcast on Operand A) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_bf16_i8, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = cutlass::bfloat16_t; using ElementB = int8_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } //////////////////////////////////////////////////////////////////////////////// /// F32 <= B16 * I8 + F32 (Upcast on Operand B) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_bf16, 64x64x64_64x64x64_16x8x16) { using Shape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; using ElementA = int8_t; using ElementB = cutlass::bfloat16_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementA>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<ElementB>::value, 64>; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type; test::gemm::warp::TransformTestbed<MmaTensorOp, cutlass::gemm::GemmShape<64, 64, 64> >() .run(); } #endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
cutlass/test/unit/gemm/warp/gemm_mixed_input_sm80.cu/0
{ "file_path": "cutlass/test/unit/gemm/warp/gemm_mixed_input_sm80.cu", "repo_id": "cutlass", "token_count": 5606 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief unit tests for NHWC tensor layout */ #include "../common/cutlass_unit_test.h" #include "cutlass/layout/tensor.h" #include "cutlass/util/device_memory.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace layout { void test_nhwc_layout(int n_size, int h_size, int w_size, int c_size) { int ldc = c_size + 1; int ldw = ldc * (w_size + 2); int ldh = ldw * (h_size + 3); typedef cutlass::layout::TensorNHWC Tensor; Tensor::Stride tensor_stride({ ldc, ldw, ldh }); Tensor tensor_nhw_packed_c(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int p_idx = 0; p_idx < h_size; p_idx++) { for (int q_idx = 0; q_idx < w_size; q_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, c_idx); auto ptr_offset = tensor_nhw_packed_c(tensor_coord); decltype(ptr_offset) reference_offset = c_idx + q_idx * ldc + p_idx * ldw + n_idx * ldh; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nhw_packed_c.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nhw_packed_c.capacity( cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldh * n_size; EXPECT_EQ(capacity, referece_capacity); } __global__ void test_nhwc_inverse( int *output, int n_size, int h_size, int w_size, int c_size) { int ldc = c_size; int ldw = ldc * w_size; int ldh = ldw * h_size; typedef cutlass::layout::TensorNHWC Tensor; Tensor::Stride tensor_stride({ ldc, ldw, ldh }); Tensor tensor_nhw_packed_c(tensor_stride); for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int p_idx = 0; p_idx < h_size; p_idx++) { for (int q_idx = 0; q_idx < w_size; q_idx++) { cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, threadIdx.x); int ptr_offset = tensor_nhw_packed_c(tensor_coord); cutlass::Tensor4DCoord inv_coord = tensor_nhw_packed_c.inverse(ptr_offset); output[ptr_offset] = tensor_nhw_packed_c(inv_coord); } } } } class TestTensorNHWC { public: // // Data members // // // Methods // /// Ctor TestTensorNHWC() { } /// Runs the test void run(int n_size, int h_size, int w_size, int c_size) { size_t size = n_size * h_size * w_size * c_size; /// Device memory containing output cutlass::device_memory::allocation< int > output(size); int *output_host = (int *)malloc(sizeof(int) * size); dim3 grid(1,1); dim3 block(c_size, 1, 1); test::layout::test_nhwc_inverse<<< grid, block >>>(output.get(), n_size, h_size, w_size, c_size); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); // // Verify output // cutlass::device_memory::copy_to_host(output_host, output.get(), size); result = cudaGetLastError(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int p_idx = 0; p_idx < h_size; p_idx++) { for (int q_idx = 0; q_idx < w_size; q_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { int reference_offset = c_idx + q_idx * c_size + p_idx * (c_size * w_size) + n_idx * (c_size * w_size * h_size); EXPECT_EQ(output_host[reference_offset], reference_offset); } } } } } }; } // namespace layout } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_TensorNHWC, NHWC_1_16_8_32) { int n_size = 1; int h_size = 16; int w_size = 8; int c_size = 32; test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); test::layout::TestTensorNHWC test_nhwc; test_nhwc.run(n_size, h_size, w_size, c_size); } TEST(Layout_TensorNHWC, NHWC_2_16_8_32) { int n_size = 2; int h_size = 16; int w_size = 8; int c_size = 32; test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); test::layout::TestTensorNHWC test_nhwc; test_nhwc.run(n_size, h_size, w_size, c_size); } TEST(Layout_TensorNHWC, NHWC_2_16_8_128) { int n_size = 2; int h_size = 16; int w_size = 8; int c_size = 128; test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); test::layout::TestTensorNHWC test_nhwc; test_nhwc.run(n_size, h_size, w_size, c_size); } TEST(Layout_TensorNHWC, NHWC_4_8_16_128) { int n_size = 4; int h_size = 8; int w_size = 16; int c_size = 128; test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size); test::layout::TestTensorNHWC test_nhwc; test_nhwc.run(n_size, h_size, w_size, c_size); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/layout/tensor_nhwc.cu/0
{ "file_path": "cutlass/test/unit/layout/tensor_nhwc.cu", "repo_id": "cutlass", "token_count": 2912 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class as it would be used in a Warp specialized loop */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/barrier.h" #include "cutlass/arch/reg_reconfig.h" using namespace cute; using namespace cutlass; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages>::SharedStorage storage ; }; struct KernelParams { uint32_t num_iterations; int* data_ptr; }; // Goal of this kernel is to complete deadlock-free template <typename ClusterShape, uint32_t Stages> __launch_bounds__(384, 1) __global__ static void pipeline_device(KernelParams const kernel_params) { extern __shared__ char shared_memory[]; using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>; using PipelineState = typename cutlass::PipelineState<Stages>; using SharedStorage = SharedStorage<Stages>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); [[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / 128, 0); int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0); int warp_group_thread_idx = threadIdx.x % 128; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers); uint32_t const per_cta_bytes = sizeof(uint32_t); // mbarrier.init typename MainloopPipeline::Params params; params.transaction_bytes = TmaTransactionBytes; if (warp_group_idx == 0) { params.role = MainloopPipeline::ThreadCategory::Producer; } else { params.role = MainloopPipeline::ThreadCategory::Consumer; } params.is_leader = warp_group_thread_idx == 0; params.num_consumers = 128; MainloopPipeline pipeline(shared_storage.storage, params, cluster_shape); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Producer WarpGroup if (warp_group_idx == 0) { cutlass::arch::warpgroup_reg_alloc<232>(); int lane_predicate = cute::elect_one_sync(); if (warp_idx_in_warpgroup == 0 && lane_predicate) { int tma_k_prologue = min(Stages, kernel_params.num_iterations); // Simulating Prologue TMA Loads // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState smem_pipe_write = make_producer_start_state<MainloopPipeline>(); CUTLASS_PRAGMA_UNROLL for(int i = 0; i < tma_k_prologue; ++i) { pipeline.producer_acquire(smem_pipe_write); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; } int tma_k_iter = kernel_params.num_iterations - tma_k_prologue; // Simulating Mainloop TMA Loads CUTE_NO_UNROLL for ( ; tma_k_iter > 0; --tma_k_iter) { pipeline.producer_acquire(smem_pipe_write); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(smem_pipe_write, per_cta_bytes); // Advance write stage ++smem_pipe_write; } // Tail Loop // Handles the case where we never enter the mainloop PipelineState tail = tma_k_prologue == Stages ? smem_pipe_write : PipelineState{}; for ( int i = 0; i < tma_k_prologue; ++i) { pipeline.producer_acquire(tail); ++tail; } } // Consumer WarpGroup } else if(warp_group_idx == 1) { cutlass::arch::warpgroup_reg_alloc<232>(); PipelineState smem_pipe_read; PipelineState smem_pipe_release; // simulates accumulators + extra reg. pressure int arr[168]; // Init Shared Memory read stages & PhaseBit static constexpr uint32_t K_PIPE_MMAS = 1; static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight"); // Total number of gemm iterations auto gemm_k_iterations = kernel_params.num_iterations; // Simulating Prologue MMAs int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations); CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < mma_k_prologue; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here ++smem_pipe_read; } gemm_k_iterations -= mma_k_prologue; // Simulating Mainloop MMAs CUTLASS_PRAGMA_NO_UNROLL for ( ; gemm_k_iterations > 0; --gemm_k_iterations) { /// Wait on the smem_pipe_read stage / phase pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here // Dummy op - which will never happen // But simulates high register usage. CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 256){ arr[i] += kernel_params.data_ptr[i]; } } pipeline.consumer_release(smem_pipe_release); // Advance stages ++smem_pipe_read; ++smem_pipe_release; } // Dummy op - which will never happen CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 256){ kernel_params.data_ptr[i] = arr[i]; } } // Tail Loop for (int i = 0; i < K_PIPE_MMAS; ++i){ pipeline.consumer_release(smem_pipe_release); ++smem_pipe_release; } // Warp-Group #2 } else { cutlass::arch::warpgroup_reg_dealloc<40>(); } } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128 * 3; using ClusterShape = ClusterShape_; // // Methods // // Ctor PipelineTest(){}; // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) [[maybe_unused]] auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; cudaEvent_t events[2]; cudaError_t result; for (cudaEvent_t & event : events) { result = cudaEventCreate(&event); if (result != cudaSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>; int smem_size = int(sizeof(SharedStorage<Stages>)); result = cudaFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with kBlockSize threads per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; KernelParams params{kNumIters, nullptr}; void* kernel_params[] = {reinterpret_cast<void*>(&params)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != cudaSuccess) { std::cerr << "Failed to create event."; return result; } for (cudaEvent_t & event : events) { (void)cudaEventDestroy(event); } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized.cu/0
{ "file_path": "cutlass/test/unit/pipeline/pipeline_tma_async_warp_specialized.cu", "repo_id": "cutlass", "token_count": 6453 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines a data structure in which a set of functionally equivalent library::Operation instances may be queried. */ #pragma once #include <fstream> #include <iosfwd> #include <unordered_map> #include <algorithm> #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" #include "cutlass/library/util.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// // Data Structures for Gemm Functional Maps ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tuple uniquely identifying Gemm functional behavior struct GemmFunctionalKey { Provider provider; GemmKind gemm_kind; NumericTypeID element_compute; NumericTypeID element_scalar; NumericTypeID element_A; LayoutTypeID layout_A; ComplexTransform transform_A; NumericTypeID element_B; LayoutTypeID layout_B; ComplexTransform transform_B; NumericTypeID element_C; LayoutTypeID layout_C; NumericTypeID element_D; LayoutTypeID layout_D; // // Methods // inline GemmFunctionalKey( Provider provider, GemmKind gemm_kind = GemmKind::kGemm, NumericTypeID element_compute = NumericTypeID::kF32, NumericTypeID element_scalar = NumericTypeID::kF32, NumericTypeID element_A = NumericTypeID::kF16, LayoutTypeID layout_A = LayoutTypeID::kColumnMajor, ComplexTransform transform_A = ComplexTransform::kNone, NumericTypeID element_B = NumericTypeID::kF16, LayoutTypeID layout_B = LayoutTypeID::kColumnMajor, ComplexTransform transform_B = ComplexTransform::kNone, NumericTypeID element_C = NumericTypeID::kF16, LayoutTypeID layout_C = LayoutTypeID::kColumnMajor, NumericTypeID element_D = NumericTypeID::kF16, LayoutTypeID layout_D = LayoutTypeID::kColumnMajor ): provider(provider), gemm_kind(gemm_kind), element_compute(element_compute), element_scalar(element_scalar), element_A(element_A), layout_A(layout_A), transform_A(transform_A), element_B(element_B), layout_B(layout_B), transform_B(transform_B), element_C(element_C), layout_C(layout_C), element_D(element_D), layout_D(layout_D) { } inline bool operator==(GemmFunctionalKey const &rhs) const { return (provider == rhs.provider) && (gemm_kind == rhs.gemm_kind) && (element_compute == rhs.element_compute) && (element_scalar == rhs.element_scalar) && (element_A == rhs.element_A) && (layout_A == rhs.layout_A) && (transform_A == rhs.transform_A) && (element_B == rhs.element_B) && (layout_B == rhs.layout_B) && (transform_B == rhs.transform_B) && (element_C == rhs.element_C) && (layout_C == rhs.layout_C) && (element_D == rhs.element_D) && (layout_D == rhs.layout_D); } inline bool operator!=(GemmFunctionalKey const &rhs) const { return !(*this == rhs); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// inline std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k) { out << "{\n" << " provider: " << to_string(k.provider) << "\n" << " gemm_kind: " << to_string(k.gemm_kind) << "\n" << " element_compute: " << to_string(k.element_compute) << "\n" << " element_scalar: " << to_string(k.element_scalar) << "\n" << " element_A: " << to_string(k.element_A) << "\n" << " layout_A: " << to_string(k.layout_A) << "\n" << " transform_A: " << to_string(k.transform_A) << "\n" << " element_B: " << to_string(k.element_B) << "\n" << " layout_B: " << to_string(k.layout_B) << "\n" << " transform_B: " << to_string(k.transform_B) << "\n" << " element_C: " << to_string(k.element_C) << "\n" << " layout_C: " << to_string(k.layout_C) << "\n" << " element_D: " << to_string(k.element_D) << "\n" << " layout_D: " << to_string(k.layout_D) << "\n" << "}"; return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Hash function for GemmFunctionalKey struct GemmFunctionalKeyHasher { using IntHash = std::hash<int>; inline static size_t rotl(size_t key, int shl) { return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl))); } inline size_t operator()(GemmFunctionalKey const &key) const { IntHash hash; return rotl(hash(int(key.provider)), 1) ^ rotl(hash(int(key.gemm_kind)), 2) ^ rotl(hash(int(key.element_compute)), 3) ^ rotl(hash(int(key.element_scalar)), 4) ^ rotl(hash(int(key.element_A)), 5) ^ rotl(hash(int(key.layout_A)), 6) ^ rotl(hash(int(key.transform_A)), 7) ^ rotl(hash(int(key.element_B)), 8) ^ rotl(hash(int(key.layout_B)), 9) ^ rotl(hash(int(key.transform_B)), 10) ^ rotl(hash(int(key.element_C)), 11) ^ rotl(hash(int(key.layout_C)), 12) ^ rotl(hash(int(key.element_D)), 13) ^ rotl(hash(int(key.layout_D)), 14); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Establishes a partial ordering to search for GEMM operators struct GemmPreferenceKey { int compute_capability; int alignment; // // Methods // GemmPreferenceKey(): compute_capability(), alignment() { } GemmPreferenceKey(int cc, int alignment): compute_capability(cc), alignment(alignment) { } bool operator<(GemmPreferenceKey const &rhs) const { return (compute_capability < rhs.compute_capability) || ((compute_capability == rhs.compute_capability) && (alignment < rhs.alignment)); } bool operator==(GemmPreferenceKey const &rhs) const { return compute_capability == rhs.compute_capability; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// inline std::ostream& operator<< (std::ostream& out, const cutlass::library::GemmPreferenceKey& key) { out << "{\n" << "compute_capability : " << key.compute_capability << std::endl << "alignment : " << key.alignment << std::endl << "}"; return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Maps minimum compute capability onto a vector of possible operations using GemmOperationVectorMap = std::map< GemmPreferenceKey, std::vector<Operation const *> >; /// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm using GemmOperationFunctionalMap = std::unordered_map< GemmFunctionalKey, GemmOperationVectorMap, GemmFunctionalKeyHasher >; ///////////////////////////////////////////////////////////////////////////////////////////////// // Data Structures for Conv Functional Maps ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tuple uniquely identifying conv2d functional behavior struct ConvFunctionalKey { library::Provider provider; library::ConvKind conv_kind; library::NumericTypeID element_A; library::LayoutTypeID layout_A; library::NumericTypeID element_B; library::LayoutTypeID layout_B; library::NumericTypeID element_C; library::LayoutTypeID layout_C; library::NumericTypeID element_accumulator; library::NumericTypeID element_compute; // // Methods // inline ConvFunctionalKey( library::Provider provider = library::Provider::kInvalid, library::ConvKind conv_kind = library::ConvKind::kFprop, library::NumericTypeID element_A = library::NumericTypeID::kF16, library::LayoutTypeID layout_A = library::LayoutTypeID::kTensorNHWC, library::NumericTypeID element_B = library::NumericTypeID::kF16, library::LayoutTypeID layout_B = library::LayoutTypeID::kTensorNHWC, library::NumericTypeID element_C = library::NumericTypeID::kF16, library::LayoutTypeID layout_C = library::LayoutTypeID::kTensorNHWC, library::NumericTypeID element_accumulator = library::NumericTypeID::kF32, library::NumericTypeID element_compute = library::NumericTypeID::kF32 ): provider(provider), conv_kind(conv_kind), element_A(element_A), layout_A(layout_A), element_B(element_B), layout_B(layout_B), element_C(element_C), layout_C(layout_C), element_accumulator(element_accumulator), element_compute(element_compute) { } inline bool operator==(ConvFunctionalKey const &rhs) const { return (provider == rhs.provider) && (conv_kind == rhs.conv_kind) && (element_A == rhs.element_A) && (layout_A == rhs.layout_A) && (element_B == rhs.element_B) && (layout_B == rhs.layout_B) && (element_C == rhs.element_C) && (layout_C == rhs.layout_C) && (element_accumulator == rhs.element_accumulator) && (element_compute == rhs.element_compute); } inline bool operator!=(ConvFunctionalKey const &rhs) const { return !(*this == rhs); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// inline std::ostream& operator<< (std::ostream& out, const cutlass::library::ConvFunctionalKey& key) { out << "{\n" << "provider: " << to_string(key.provider) << std::endl << "conv_kind: " << to_string(key.conv_kind) << std::endl << "element_A: " << to_string(key.element_A) << std::endl << "layout_A: " << to_string(key.layout_A) << std::endl << "element_B: " << to_string(key.element_B) << std::endl << "layout_B: " << to_string(key.layout_B) << std::endl << "element_C: " << to_string(key.element_C) << std::endl << "layout_C: " << to_string(key.layout_C) << std::endl << "element_accumulator: " << to_string(key.element_accumulator) << std::endl << "element_compute: " << to_string(key.element_compute) << std::endl << "}"; return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// struct ConvFunctionalKeyHasher { using IntHash = std::hash<int>; inline static size_t rotl(size_t key, int shl) { return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl))); } inline size_t operator()(ConvFunctionalKey const &key) const { IntHash hash; return rotl(hash(int(key.provider)), 1) ^ rotl(hash(int(key.conv_kind)), 2) ^ rotl(hash(int(key.element_A)), 3) ^ rotl(hash(int(key.layout_A)), 4) ^ rotl(hash(int(key.element_B)), 5) ^ rotl(hash(int(key.layout_B)), 6) ^ rotl(hash(int(key.element_C)), 7) ^ rotl(hash(int(key.layout_C)), 8) ^ rotl(hash(int(key.element_accumulator)), 9) ^ rotl(hash(int(key.element_compute)), 10); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Establishes a partial ordering to search for Conv2d operators struct ConvPreferenceKey { int compute_capability; IteratorAlgorithmID iterator_algorithm; // // Methods // ConvPreferenceKey(): compute_capability(), iterator_algorithm() { } ConvPreferenceKey(int cc, IteratorAlgorithmID iterator_algorithm): compute_capability(cc), iterator_algorithm(iterator_algorithm) { } bool operator<(ConvPreferenceKey const &rhs) const { return (compute_capability < rhs.compute_capability) || ((compute_capability == rhs.compute_capability) && (iterator_algorithm < rhs.iterator_algorithm)); } bool operator==(ConvPreferenceKey const &rhs) const { return (compute_capability == rhs.compute_capability) && (iterator_algorithm == rhs.iterator_algorithm); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Maps minimum compute capability onto a vector of possible operations using ConvOperationVectorMap = std::map< ConvPreferenceKey, std::vector<Operation const *> >; /// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm using ConvOperationFunctionalMap = std::unordered_map< ConvFunctionalKey, ConvOperationVectorMap, ConvFunctionalKeyHasher >; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tuple uniquely identifying conv2d functional behavior struct ReductionFunctionalKey { library::Provider provider; library::NumericTypeID element_workspace; library::NumericTypeID element_accumulator; library::NumericTypeID element_output; library::NumericTypeID element_compute; library::MathOperationID reduce_math_op; library::EpilogueKind epilogue_math_op; // // Methods // inline ReductionFunctionalKey( library::Provider provider = library::Provider::kInvalid, library::NumericTypeID element_workspace = library::NumericTypeID::kF16, library::NumericTypeID element_accumulator = library::NumericTypeID::kF32, library::NumericTypeID element_output = library::NumericTypeID::kF16, library::NumericTypeID element_compute = library::NumericTypeID::kF32, library::MathOperationID reduce_math_op = library::MathOperationID::kAdd, library::EpilogueKind epilogue_math_op = library::EpilogueKind::kLinearCombination ): provider(provider), element_workspace(element_workspace), element_accumulator(element_accumulator), element_output(element_output), element_compute(element_compute), reduce_math_op(reduce_math_op), epilogue_math_op(epilogue_math_op) { } inline bool operator==(ReductionFunctionalKey const &rhs) const { return (provider == rhs.provider) && (element_workspace == rhs.element_workspace) && (element_accumulator == rhs.element_accumulator) && (element_output == rhs.element_output) && (element_compute == rhs.element_compute) && (reduce_math_op == rhs.reduce_math_op) && (epilogue_math_op == rhs.epilogue_math_op); } inline bool operator!=(ReductionFunctionalKey const &rhs) const { return !(*this == rhs); } }; struct ReductionFunctionalKeyHasher { using IntHash = std::hash<int>; inline static size_t rotl(size_t key, int shl) { return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl))); } inline size_t operator()(ReductionFunctionalKey const &key) const { IntHash hash; return rotl(hash(int(key.provider)), 1) ^ rotl(hash(int(key.element_workspace)), 2) ^ rotl(hash(int(key.element_accumulator)), 3) ^ rotl(hash(int(key.element_output)), 4) ^ rotl(hash(int(key.element_compute)), 5) ^ rotl(hash(int(key.reduce_math_op)), 6) ^ rotl(hash(int(key.epilogue_math_op)), 7); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// inline std::ostream& operator<< (std::ostream& out, const ReductionFunctionalKey& key) { out << "{\n" << "provider: " << library::to_string(key.provider) << std::endl << "element_workspace : " << library::to_string(key.element_workspace) << std::endl << "element_accumulator : " << library::to_string(key.element_accumulator) << std::endl << "element_output : " << library::to_string(key.element_output) << std::endl << "element_compute : " << library::to_string(key.element_compute) << std::endl << "}"; return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// // ReductionOperationFunctionalMap has NO preference key and a single instance per functional key // i.e. only one tile size configuration per functional key using ReductionOperationFunctionalMap = std::unordered_map< ReductionFunctionalKey, library::Operation const *, ReductionFunctionalKeyHasher >; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Table of cutlass::library::Operation instances class OperationTable { public: /// Map of all operations of type kGemm // provider (kCUTLASS) GemmOperationFunctionalMap gemm_operations; /// Map of all operations of type kConv2d // provider (kCUTLASS, kReferenceHost, kReferenceDevice) ConvOperationFunctionalMap conv2d_operations; /// Map of all operations of type kConv3d // provider (kCUTLASS, kReferenceHost, kReferenceDevice) ConvOperationFunctionalMap conv3d_operations; /// Map of all operations of type kConv2d // provider (kCUTLASS) ReductionOperationFunctionalMap reduction_operations; public: void append(Manifest const &manifest); }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k);
cutlass/tools/library/include/cutlass/library/operation_table.h/0
{ "file_path": "cutlass/tools/library/include/cutlass/library/operation_table.h", "repo_id": "cutlass", "token_count": 6529 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for reduction operation in CUTLASS Library. */ #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" #include "reduction_operation.h" namespace cutlass { namespace library { // naming convention initialize_reduce_[ReductionOp]_[EpilogueOp]_[ElementWorkspace]_[ElementAccumulator]_[ElementOutput] void initialize_reduce_add_linear_combination_f16_f16_f16(Manifest &manifest) { using ElementWorkspace = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementCompute = cutlass::half_t; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_f16_f16_f16 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_f16_f16_f16>( "reduce_add_linear_combination_f16_f16_f16" )); } void initialize_reduce_add_linear_combination_f32_f32_f16(Manifest &manifest) { using ElementWorkspace = float; using ElementAccumulator = float; using ElementOutput = cutlass::half_t; using ElementCompute = float; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_f32_f32_f16 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_f32_f32_f16>( "reduce_add_linear_combination_f32_f32_f16" )); } void initialize_reduce_add_linear_combination_f32_f32_bf16(Manifest &manifest) { using ElementWorkspace = float; using ElementAccumulator = float; using ElementOutput = cutlass::bfloat16_t; using ElementCompute = float; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_f32_f32_bf16 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_f32_f32_bf16>( "reduce_add_linear_combination_f32_f32_bf16" )); } void initialize_reduce_add_linear_combination_f32_f32_f32(Manifest &manifest) { using ElementWorkspace = float; using ElementAccumulator = float; using ElementOutput = float; using ElementCompute = float; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_f32_f32_f32 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_f32_f32_f32>( "reduce_add_linear_combination_f32_f32_f32" )); } void initialize_reduce_add_linear_combination_f64_f64_f64(Manifest &manifest) { using ElementWorkspace = double; using ElementAccumulator = double; using ElementOutput = double; using ElementCompute = double; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_f64_f64_f64 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_f64_f64_f64>( "reduce_add_linear_combination_f64_f64_f64" )); } void initialize_reduce_add_linear_combination_cf32_cf32_cf32(Manifest &manifest) { using ElementWorkspace = cutlass::complex<float>; using ElementAccumulator = cutlass::complex<float>; using ElementOutput = cutlass::complex<float>; using ElementCompute = cutlass::complex<float>; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementWorkspace>::value, ElementAccumulator, ElementCompute >; using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using Operation_reduce_add_linear_combination_cf32_cf32_cf32 = cutlass::reduction::device::ReduceSplitK< cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp > >; manifest.append(new ReductionOperation< Operation_reduce_add_linear_combination_cf32_cf32_cf32>( "reduce_add_linear_combination_cf32_cf32_cf32" )); } } }
cutlass/tools/library/src/reduction/reduction_device.cu/0
{ "file_path": "cutlass/tools/library/src/reduction/reduction_device.cu", "repo_id": "cutlass", "token_count": 2880 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #pragma once #include <stdexcept> #include <list> #include <vector> #include "cutlass/library/library.h" #include "cutlass/util/distribution.h" #include "enumerated_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Device memory allocation class DeviceAllocation { private: /// Data type of contained elements library::NumericTypeID type_; /// Gets the stride between elements size_t batch_stride_; /// Capacity in elements of device allocation size_t capacity_; /// Pointer to device memory void *pointer_; /// Layout type ID library::LayoutTypeID layout_; /// Stride vector std::vector<int64_t> stride_; /// Extent vector std::vector<int> extent_; /// Support allocating a 'batch' of non-overlapping tensors in contiguous memory int batch_count_; /// Buffer holding TensorRef instance to recently allocated memory std::vector<uint8_t> tensor_ref_buffer_; public: // // Static member functions // /// Determines the number of bytes needed to represent this numeric type static size_t bytes(library::NumericTypeID type, size_t capacity); /// Returns the stride of a packed layout static std::vector<int64_t> get_packed_layout( library::LayoutTypeID layout_id, std::vector<int> const &extent); /// returns the capacity needed static size_t construct_layout( void *bytes, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> &stride); /// Returns true if two blocks have exactly the same value static bool block_compare_equal( library::NumericTypeID numeric_type, void const *ptr_A, void const *ptr_B, size_t capacity); /// Returns true if two blocks have approximately the same value static bool block_compare_relatively_equal( library::NumericTypeID numeric_type, void const *ptr_A, void const *ptr_B, size_t capacity, double epsilon, double nonzero_floor); public: // // Methods // DeviceAllocation(); DeviceAllocation(library::NumericTypeID type, size_t capacity); DeviceAllocation( library::NumericTypeID type, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> const &stride = std::vector<int64_t>(), int batch_count = 1); ~DeviceAllocation(); DeviceAllocation &reset(); /// Allocates device memory of a given type and capacity DeviceAllocation &reset(library::NumericTypeID type, size_t capacity); /// Allocates memory for a given layout and tensor DeviceAllocation &reset( library::NumericTypeID type, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> const &stride = std::vector<int64_t>(), int batch_count = 1); /// Returns a buffer owning the tensor reference std::vector<uint8_t> &tensor_ref() { return tensor_ref_buffer_; } bool good() const; /// Data type of contained elements library::NumericTypeID type() const; /// Pointer to start of device memory allocation void *data() const; /// Pointer to the first element of a batch void *batch_data(int batch_idx) const; /// Gets the layout type library::LayoutTypeID layout() const; /// Gets the stride vector std::vector<int64_t> const & stride() const; /// Gets the extent vector std::vector<int> const & extent() const; /// Gets the number of adjacent tensors in memory int batch_count() const; /// Gets the stride (in units of elements) between items int64_t batch_stride() const; /// Gets the stride (in units of bytes) between items int64_t batch_stride_bytes() const; /// Capacity of allocation in number of elements size_t capacity() const; /// Capacity of allocation in bytes size_t bytes() const; /// Initializes a device allocation to a random distribution using cuRAND void initialize_random_device(int seed, Distribution dist); /// Initializes a host allocation to a random distribution using std::cout void initialize_random_host(int seed, Distribution dist); /// Initializes a device allocation to a sequential distribution void initialize_sequential_device(Distribution dist); /// Initializes a host allocation to a sequential distribution void initialize_sequential_host(Distribution dist); /// Initializes a device allocation to a random distribution using cuRAND void initialize_random_sparsemeta_device(int seed, int MetaSizeInBits); /// Initializes a host allocation to a random distribution using std::cout void initialize_random_sparsemeta_host(int seed, int MetaSizeInBits); /// Uniformly fills a tensor with a value when provided o.w. zero void fill_device(double value); /// Uniformly fills a host allocation with a value when provided o.w. zero void fill_host(double value); /// Copies from an equivalent-sized tensor in device memory void copy_from_device(void const *ptr); /// Copies from an equivalent-sized tensor in device memory void copy_from_host(void const *ptr); /// Copies from an equivalent-sized tensor in device memory void copy_to_host(void *ptr); /// Writes a tensor to csv void write_tensor_csv(std::ostream &out); }; using DeviceAllocationList = std::list<DeviceAllocation>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/include/cutlass/profiler/device_allocation.h/0
{ "file_path": "cutlass/tools/profiler/include/cutlass/profiler/device_allocation.h", "repo_id": "cutlass", "token_count": 2109 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Convolution 2D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cutlass/profiler/conv2d_operation_profiler.h" #include "cutlass/profiler/gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv2d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)"; } /// Destructor Conv2dOperationProfiler::~Conv2dOperationProfiler() { } /// Prints usage statement for the math function void Conv2dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv2d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv2dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv2d" " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32" " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3" " --pad_h=1 --pad_w=1" " --stride_h=1 --stride_w=1" " --dilation_h=1 --dilation_w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv2dOperationProfiler::Conv2dProblem::bytes( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv2dOperationProfiler::Conv2dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w); } int64_t flops_total_ = flops_mainloop_ + flops_epilogue_; //complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_total_ *=4; break; default: break; } return flops_total_; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv2dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 16; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 16; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 64; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 64; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.groups, "g", problem_space, problem)) { // default value problem_.groups = 1; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constrained by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::Conv2dConfiguration conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize( int(problem_.n), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.r), int(problem_.s), int(problem_.p), int(problem_.q), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), int(problem_.groups) ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.set_stride_vector( problem_, operation_desc.conv_kind, operation_desc.A.layout, operation_desc.B.layout, operation_desc.C.layout); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv2dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "g", problem_space, problem_.groups); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) * conv_workspace_.configuration.problem_size.activation_size(); int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) * conv_workspace_.configuration.problem_size.filter_size(); int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) * conv_workspace_.configuration.problem_size.output_size(); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoretical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimensions and library::Operation bool Conv2dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0); /// initialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv2dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.configuration.stride_a, conv_workspace_.problem_count, seed_shift++ ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count, seed_shift++ ); if(problem_.groups == problem_.c && problem_.groups == problem_.k){ // Depthwise direct conv kernel needs reorder the filter. conv_workspace_.reordered_B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count, seed_shift++ ); } conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count, seed_shift++ ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv2d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv2dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } cudaError_t result; // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.reordered_B != nullptr){ conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data(); }else{ conv_workspace_.arguments.reordered_B = nullptr; } conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification device reference if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_device_reference_( options, report, device_context, operation, problem_space, problem); } // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host reference instance is selected std::cout << conv2d_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv2d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Initialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find device reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceDevice, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // conv2d device reference minimum cc is 50 and no iterator algorithm library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // device reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Initialize device reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run device reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified; return true; } // // Verify results // results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceDevice); } // Return true means continue profiling return true; } /// Measures performance results bool Conv2dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Method to profile a CUTLASS Operation Status Conv2dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments); if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv2dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/conv2d_operation_profiler.cu/0
{ "file_path": "cutlass/tools/profiler/src/conv2d_operation_profiler.cu", "repo_id": "cutlass", "token_count": 20115 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cutlass/profiler/cublas_helpers.h" #include "cutlass/profiler/rank_2k_operation_profiler.h" #include "cutlass/profiler/gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Rank2KOperationProfiler::Rank2KOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kRank2K, { {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"}, {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Rank 2k Update. D = alpha * (A*B^T + B*A^T) + beta * C (symmetric) or D = alpha * (A*B^H+B*A^H) + beta * C (hermitian)"; } /// Destructor Rank2KOperationProfiler::~Rank2KOperationProfiler() { } /// Prints usage statement for the math function void Rank2KOperationProfiler::print_usage(std::ostream &out) const { out << "RankK" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Rank2KOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size Syrk kernel:\n" << " $ cutlass_profiler --operation=rank_2k --blas_mode=symmetric --n=1024 --k=128\n\n" << "Profile a particular problem size Herk kernel:\n" << " $ cutlass_profiler --operation=rank_2k --blas_mode=hermitian --n=1024 --k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=rank_2k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=rank_2k --accumulator-type=f16,f32\n\n" << "Schmoo over fill modees:\n" << " $ cutlass_profiler --operation=rank_2k --fill_mode=lower/upper\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=rank_2k --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=rank_2k --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=rank_2k --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=rank_2k --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=rank_2k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to rank_2k kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=rank_2k \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status Rank2KOperationProfiler::RankKProblem::parse( library::RankKDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->k)}).front(); this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->n), int(this->k)}).front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->n), int(this->n)}).front(); return Status::kSuccess; } /// Total number of bytes loaded int64_t Rank2KOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const { // Input bytes read and Output bytes written for the gemm problem int64_t bytes = 2 * int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + 2 * int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k + // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; } bytes *= batch_count; return bytes; } /// Total number of flops computed int64_t Rank2KOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const { // FLOPs = 2 * n(n+1)k/2 [mma1] + 2 * n(n+1)k/2 [mma2] + 2 * n(n+1)/2 [epilogue] // FLOPs = n(n+1)(2k + 1) int64_t flops_ = n * (n + 1) * (2*k + 1); // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddGaussianComplex: flops_ *= 3; break; default: break; } return flops_; } /// Initializes a performance result void Rank2KOperationProfiler::RankKProblem::initialize_result( PerformanceResult &result, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Rank2KOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } rank_k_workspace_.configuration.problem_size.m() = int(problem_.n); rank_k_workspace_.configuration.problem_size.n() = int(problem_.n); rank_k_workspace_.configuration.problem_size.k() = int(problem_.k); rank_k_workspace_.configuration.lda = problem_.lda; rank_k_workspace_.configuration.ldb = problem_.ldb; rank_k_workspace_.configuration.ldc = problem_.ldc; rank_k_workspace_.configuration.ldd = problem_.ldc; //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices); rank_k_workspace_.arguments.A = nullptr; rank_k_workspace_.arguments.B = nullptr; rank_k_workspace_.arguments.C = nullptr; rank_k_workspace_.arguments.D = nullptr; rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments); } /// Initializes the performance result void Rank2KOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); result.bytes = problem_.bytes(operation_desc); result.flops = problem_.flops(operation_desc); result.runtime = 0; } /// Initializes workspace Status Rank2KOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; rank_k_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.k)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.n), int(problem_.k)}, {int(problem_.ldb)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data()); rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &rank_k_workspace_.configuration, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kRank2K; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Rank2KOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); if (cublas_satisfies(rank_k_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Rank2KOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::RankKDescription const &rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; cublasStatus_t status = handle.get_cublas_create_status(); if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Syr2k() // // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasRankKDispatcher rank_k_op( rank_k_desc, rank_k_workspace_.configuration, rank_k_workspace_.arguments ); if (rank_k_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = rank_k_op(handle); // Handle errors if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *rank_k_workspace_.Computed, *rank_k_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, rank_k_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool Rank2KOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.B = rank_k_workspace_.B->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/rank_2k_operation_profiler.cu/0
{ "file_path": "cutlass/tools/profiler/src/rank_2k_operation_profiler.cu", "repo_id": "cutlass", "token_count": 8854 }
61
/****************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout. */ #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/tensor_ref.h" #include "device_utils.h" #include <float.h> namespace cutlass { /** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout. * \tparam T: data type */ template <typename T> void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord filter_tensor_size, cutlass::Tensor4DCoord output_tensor_size, cutlass::MatrixCoord padding, cutlass::MatrixCoord stride, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, int poolingType, //0 for avg pooling ; 1 for max pooling cudaStream_t stream); /** get the output size of pooling */ inline int getOutputSize(int H_W, int padding, int kernel_size, int stride) { return (H_W + 2 * padding - kernel_size) / stride + 1; } /** * input is [N, H, W, C] * assume stride == kernel_size * output_h = (H + 2*padding_H - kernel_H)/stride_H * output_w = (W + 2*padding_W - kernel_W)/stride_W * output is [N, output_h, output_w, C] * grid(N, output_h, output_w) * block(min(C, 256)) : * each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output) */ template<typename T, bool IS_AVG_POOLING> __global__ void pooling_nhwc_element1_kernel(T* output, const T* input, const int N, const int H, const int W, const int C, const int output_H, const int output_W, const int kernel_H, const int kernel_W, const int stride_H, const int stride_W, const int padding_H, const int padding_W) { const int tid = threadIdx.x; const int n_idx = blockIdx.x; const int output_h_idx = blockIdx.y; const int output_w_idx = blockIdx.z; int h_start_idx = output_h_idx * stride_H - padding_H; int h_end_idx = h_start_idx + kernel_H; h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; h_end_idx = h_end_idx > H ? H : h_end_idx; int w_start_idx = output_w_idx * stride_W - padding_W; int w_end_idx = w_start_idx + kernel_W; w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; w_end_idx = w_end_idx > W ? W : w_end_idx; input += n_idx * H * W * C; output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; const int kernel_size2 = kernel_H * kernel_W; for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { float pooling; if (IS_AVG_POOLING){ pooling = 0.0f; } else{ pooling = -FLT_MAX; } for (int h = h_start_idx; h < h_end_idx; h++) { for (int w = w_start_idx; w < w_end_idx; w++) { const int idx = (h * W + w) * C; const float tmp = static_cast<float>(input[idx + c_idx]); if (IS_AVG_POOLING){ pooling = pooling + tmp; } else{ pooling = pooling > tmp ? pooling : tmp; } } } T output_val; if (IS_AVG_POOLING){ output_val = T(pooling/kernel_size2); } else{ output_val = T(pooling); } output[c_idx] = output_val; } } template<typename T2, typename T, bool IS_AVG_POOLING> __global__ void pooling_nhwc_element2_kernel(T2* output, const T2* input, const int N, const int H, const int W, const int C, const int output_H, const int output_W, const int kernel_H, const int kernel_W, const int stride_H, const int stride_W, const int padding_H, const int padding_W) { const int tid = threadIdx.x; const int n_idx = blockIdx.x; const int output_h_idx = blockIdx.y; const int output_w_idx = blockIdx.z; int h_start_idx = output_h_idx * stride_H - padding_H; int h_end_idx = h_start_idx + kernel_H; h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; h_end_idx = h_end_idx > H ? H : h_end_idx; int w_start_idx = output_w_idx * stride_W - padding_W; int w_end_idx = w_start_idx + kernel_W; w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; w_end_idx = w_end_idx > W ? W : w_end_idx; input += n_idx * H * W * C; output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; const int kernel_size2 = kernel_H * kernel_W; for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { float2 pooling; if (IS_AVG_POOLING) { pooling = {0.0f, 0.0f}; } else { pooling = {-FLT_MAX, -FLT_MAX}; } for (int h = h_start_idx; h < h_end_idx; h++) { for (int w = w_start_idx; w < w_end_idx; w++) { const int idx = (h * W + w) * C; const T2 tmp = input[idx + c_idx]; const float2 tmp_flt2 = {static_cast<float>(tmp.x), static_cast<float>(tmp.y)}; if (IS_AVG_POOLING) { pooling.x += tmp_flt2.x; pooling.y += tmp_flt2.y; } else { pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x; pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y; } } } T2 output_val; if (IS_AVG_POOLING) { output_val.x = T(pooling.x/kernel_size2); output_val.y = T(pooling.y/kernel_size2); } else { output_val.x = T(pooling.x); output_val.y = T(pooling.y); } output[c_idx] = output_val; } } /** * output [N, 1, 1, C] * input [N, H, W, C] * grid(C, N) * block(block_size) -- each block deals with H*W/block_size elements; */ template<typename T, bool IS_AVG_POOLING> __global__ void pooling_nxhTo1x1_element1_kernel( T* output, const T* input, const int N, const int HW, const int C) { const int c_idx = blockIdx.x; const int n_idx = blockIdx.y; float pooling[1]; if (IS_AVG_POOLING) { pooling[0] = 0.0f; } else { pooling[0] = -FLT_MAX; } const size_t input_offset = n_idx * HW * C + c_idx; input += input_offset; const size_t output_offset = n_idx * C + c_idx; output += output_offset; int tid = threadIdx.x; for (int index = tid; index < HW; index += blockDim.x) { float val = static_cast<float>(input[index * C]); if (IS_AVG_POOLING) { pooling[0] += val; } else { pooling[0] = pooling[0] > val ? pooling[0] : val; } } if (blockDim.x <= 32) { if (IS_AVG_POOLING) { warpReduceSum<float, 1>(pooling); } else { warpReduceMax<float, 1>(pooling); } } else { if (IS_AVG_POOLING) { blockReduceSum<float, 1>(pooling); } else { blockReduceMax<float, 1>(pooling); } } __syncthreads(); if (threadIdx.x == 0) { T output_val; if (IS_AVG_POOLING) { output_val = T(pooling[0] / HW); } else { output_val = T(pooling[0]); } output[0] = output_val; } } /** * output [N, 1, 1, C] * input [N, H, W, C] * grid(C/2, N) * block(block_size) -- each thread deals with H*W/block_size * 2 elements; */ template<typename T2, typename T, bool IS_AVG_POOLING> __global__ void pooling_nxhTo1x1_element2_kernel( T2* output, const T2* input, const int N, const int HW, const int C) { const int c_idx = blockIdx.x; const int n_idx = blockIdx.y; float pooling[2]; if (IS_AVG_POOLING) { pooling[0] = pooling[1] = 0.0f; } else { pooling[0] = pooling[1] = -FLT_MAX; } const int C_2 = C / 2; const size_t input_offset = n_idx * HW * C_2 + c_idx; input += input_offset; const size_t output_offset = n_idx * C_2 + c_idx; output += output_offset; int tid = threadIdx.x; for (int index = tid; index < HW; index += blockDim.x) { T2 val = input[index * C_2]; float2 val_flt2 = {static_cast<float>(val.x), static_cast<float>(val.y)}; if (IS_AVG_POOLING) { pooling[0] += val_flt2.x; pooling[1] += val_flt2.y; } else { pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x; pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y; } } if (blockDim.x <= 32) { if (IS_AVG_POOLING) { warpReduceSum<float, 2>(pooling); } else { warpReduceMax<float, 2>(pooling); } } else { if (IS_AVG_POOLING) { blockReduceSum<float, 2>(pooling); } else { blockReduceMax<float, 2>(pooling); } } __syncthreads(); if (threadIdx.x == 0) { T2 output_val; if (IS_AVG_POOLING) { output_val.x = T(pooling[0] / HW); output_val.y = T(pooling[1] / HW); } else { output_val.x = T(pooling[0]); output_val.y = T(pooling[1]); } output[0] = output_val; } } template <typename T> void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord filter_tensor_size, cutlass::Tensor4DCoord output_tensor_size, cutlass::Tensor4DCoord padding, cutlass::MatrixCoord stride, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, int poolingType, //0 for avg pooling ; 1 for max pooling cudaStream_t stream) { assert(input_tensor_size.n() == output_tensor_size.n() && input_tensor_size.c() == output_tensor_size.c()); assert(filter_tensor_size.h() == stride.row() && filter_tensor_size.w() == stride.column()); const int N = input_tensor_size.n(); const int H = input_tensor_size.h(); const int W = input_tensor_size.w(); const int C = input_tensor_size.c(); const int padding_H = padding.h(); const int padding_W = padding.w(); const int kernel_H = filter_tensor_size.h(); const int kernel_W = filter_tensor_size.w(); const int stride_H = stride.row(); const int stride_W = stride.column(); const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H); const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W); assert(output_tensor_size.h() == output_H && output_tensor_size.w() == output_W); if (C % 2 != 0) { if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { dim3 grid(C, N); dim3 block(256); if (H*W < block.x){ block.x = (H*W + 31)/32*32; } if (poolingType == 0) { pooling_nxhTo1x1_element1_kernel<T, true><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H*W, C); } // if (poolingType == 0) else { pooling_nxhTo1x1_element1_kernel<T, false><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H*W, C); } } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) else { dim3 grid(N, output_H, output_W); dim3 block(256); if (C < block.x) { block.x = C; } if (poolingType == 0) { pooling_nhwc_element1_kernel<T, true><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H, W, C, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (poolingType == 0) else { pooling_nhwc_element1_kernel<T, false><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H, W, C, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } } // if (C % 2 != 0)) else { if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { dim3 grid(C/2, N); dim3 block(256); if (H*W < block.x){ block.x = (H*W + 31)/32*32; } if (poolingType == 0) { if (std::is_same<T, float>::value) { pooling_nxhTo1x1_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H*W, C); } // if (std::is_same<T, float>::value) else { pooling_nxhTo1x1_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H*W, C); } } // if (poolingType == 0) else { if (std::is_same<T, float>::value) { pooling_nxhTo1x1_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H*W, C); } // if (std::is_same<T, float>::value) else { pooling_nxhTo1x1_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H*W, C); } } } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) else { dim3 grid(N, output_H, output_W); dim3 block(256); if (C/2 < block.x) { block.x = C/2; } if (poolingType == 0) { if (std::is_same<T, float>::value) { pooling_nhwc_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (std::is_same<T, float>::value) else { pooling_nhwc_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } // if (poolingType == 0) else { if (std::is_same<T, float>::value) { pooling_nhwc_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (std::is_same<T, float>::value) else { pooling_nhwc_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } } } } } //namespace cutlass
cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h", "repo_id": "cutlass", "token_count": 9927 }
62
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for CONV in host-side code. */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/complex.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cute/tensor.hpp" #include <cuda_runtime.h> ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::reference::host { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template<class EngineAct, class LayoutAct> bool is_activation_in_bounds( cute::Tensor<EngineAct, LayoutAct> const& activation, int32_t n_, int32_t d_, int32_t h_, int32_t w_, int32_t c_) { return ((n_ >= 0 && n_ < size<4>(activation)) && (d_ >= 0 && d_ < size<3>(activation)) && (h_ >= 0 && h_ < size<2>(activation)) && (w_ >= 0 && w_ < size<1>(activation)) && (c_ >= 0 && c_ < size<0>(activation))); } template<class EngineAct, class LayoutAct> bool is_activation_in_bounds( cute::Tensor<EngineAct, LayoutAct> const& activation, int32_t n_, int32_t h_, int32_t w_, int32_t c_) { return ((n_ >= 0 && n_ < size<3>(activation)) && (h_ >= 0 && h_ < size<2>(activation)) && (w_ >= 0 && w_ < size<1>(activation)) && (c_ >= 0 && c_ < size<0>(activation))); } template<class EngineAct, class LayoutAct> bool is_activation_in_bounds( cute::Tensor<EngineAct, LayoutAct> const& activation, int32_t n_, int32_t w_, int32_t c_) { return ((n_ >= 0 && n_ < size<2>(activation)) && (w_ >= 0 && w_ < size<1>(activation)) && (c_ >= 0 && c_ < size<0>(activation))); } } // namespace detail template< class ElementAcc_, class ElementScalar_, class ElementCompute_, class ElementC_, class ElementOut_, class TensorAlpha_, class TensorBeta_, class TensorBias_, class ActivationFunctor_ = cutlass::epilogue::thread::Identity<ElementCompute_> > struct ConvEpilogueFusionParams { using ElementAcc = ElementAcc_; using ElementScalar = ElementScalar_; using ElementCompute = ElementCompute_; using ElementC = ElementC_; using ElementOut = ElementOut_; using TensorAlpha = TensorAlpha_; using TensorBeta = TensorBeta_; using TensorBias = TensorBias_; using ActivationFunctor = ActivationFunctor_; ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); TensorAlpha tensor_alpha{}; TensorBeta tensor_beta{}; TensorBias tensor_bias{}; }; template< cutlass::conv::Operator ConvOp, int NumSpatialDims, class TensorA, class TensorB, class TensorC, class TensorD, class ShapePadding, class StrideTraversal, class ShapeDilation, class EpilogueFusionParams> struct ConvReferenceImpl { using ElementAcc = typename EpilogueFusionParams::ElementAcc; using ElementC = typename EpilogueFusionParams::ElementC; using ElementOut = typename EpilogueFusionParams::ElementOut; using ElementScalar = typename EpilogueFusionParams::ElementScalar; using ElementCompute = typename EpilogueFusionParams::ElementCompute; using ElementBias = typename EpilogueFusionParams::TensorBias::value_type; using ActivationFunctor = typename EpilogueFusionParams::ActivationFunctor; // Input related converter NumericConverter<ElementCompute, ElementAcc> acc_converter; NumericConverter<ElementCompute, ElementC> residual_converter; NumericConverter<ElementCompute, ElementBias> bias_converter; // Scale related converter NumericConverter<ElementCompute, ElementScalar> scale_converter; // Output related converter NumericConverter<ElementOut, ElementCompute> output_converter; EpilogueFusionParams& epi_fusion_params_; TensorA const& tensor_a_; TensorB const& tensor_b_; TensorC const& tensor_c_; TensorD& tensor_d_; ShapePadding const& padding_; StrideTraversal const& tstride_; ShapeDilation const& dilation_; // Epilogue activation operation ActivationFunctor epi_activation; ConvReferenceImpl( TensorA const& tensor_a, TensorB const& tensor_b, TensorC const& tensor_c, TensorD& tensor_d, ShapePadding const& padding, StrideTraversal const& tstride, ShapeDilation const& dilation, EpilogueFusionParams& epi_fusion_params) : tensor_a_(tensor_a), tensor_b_(tensor_b), tensor_c_(tensor_c), tensor_d_(tensor_d), padding_(padding), tstride_(tstride), dilation_(dilation), epi_fusion_params_(epi_fusion_params) { static_assert(rank(ShapePadding{}) == rank(ShapeDilation{})); static_assert(rank(ShapePadding{}) == rank(StrideTraversal{})); } void compute_reference() { if constexpr (ConvOp == cutlass::conv::Operator::kFprop) { fprop_reference(cute::Int<NumSpatialDims>{}); } else if constexpr (ConvOp == cutlass::conv::Operator::kDgrad) { dgrad_reference(cute::Int<NumSpatialDims>{}); } else { wgrad_reference(cute::Int<NumSpatialDims>{}); } } private: // Specialization for 1D fprop kernel void fprop_reference(cute::Int<1> spatial_dims) { int32_t N = size<2>(tensor_d_); int32_t Q = size<1>(tensor_d_); int32_t K = size<0>(tensor_d_); int32_t S = size<1>(tensor_b_); int32_t C = size<0>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(2) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t q = 0; q < Q; ++q) { for (int32_t k = 0; k < K; ++k) { auto accumulator = ElementAcc(0); for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); if (detail::is_activation_in_bounds(tensor_a_, n, w, c)) { accumulator += ElementAcc(tensor_a_(c, w, n) * tensor_b_(c, s, k)); } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(k, q, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(k, q, n) = output_converter(output); } } } } // Specialization for 2D fprop kernel void fprop_reference(cute::Int<2> spatial_dims) { int32_t N = size<3>(tensor_d_); int32_t P = size<2>(tensor_d_); int32_t Q = size<1>(tensor_d_); int32_t K = size<0>(tensor_d_); int32_t R = size<2>(tensor_b_); int32_t S = size<1>(tensor_b_); int32_t C = size<0>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t p = 0; p < P; ++p) { for (int32_t q = 0; q < Q; ++q) { for (int32_t k = 0; k < K; ++k) { auto accumulator = ElementAcc(0); for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_); if (detail::is_activation_in_bounds(tensor_a_, n, h, w, c)) { accumulator += ElementAcc(tensor_a_(c, w, h, n) * tensor_b_(c, s, r, k)); } } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(k, q, p, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(k, q, p, n) = output_converter(output); } } } } } // Specialization for 3D fprop kernel void fprop_reference(cute::Int<3> spatial_dims) { int32_t N = size<4>(tensor_d_); int32_t Z = size<3>(tensor_d_); int32_t P = size<2>(tensor_d_); int32_t Q = size<1>(tensor_d_); int32_t K = size<0>(tensor_d_); int32_t T = size<3>(tensor_b_); int32_t R = size<2>(tensor_b_); int32_t S = size<1>(tensor_b_); int32_t C = size<0>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t z = 0; z < Z; ++z) { for (int32_t p = 0; p < P; ++p) { for (int32_t q = 0; q < Q; ++q) { for (int32_t k = 0; k < K; ++k) { auto accumulator = ElementAcc(0); for (int32_t t = 0; t < T; ++t) { for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_); int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_); if (detail::is_activation_in_bounds(tensor_a_, n, d, h, w, c)) { accumulator += ElementAcc(tensor_a_(c, w, h, d, n) * tensor_b_(c, s, r, t, k)); } } } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(k, q, p, z, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(k, q, p, z, n) = output_converter(output); } } } } } } // Specialization for 1D dgrad kernel void dgrad_reference(cute::Int<1> spatial_dims) { int32_t N = size<2>(tensor_d_); int32_t W = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); int32_t K = size<2>(tensor_b_); int32_t S = size<1>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(2) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t w = 0; w < W; ++w) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t k = 0; k < K; ++k) { for (int32_t s = 0; s < S; ++s) { int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_); if (q % cute::get<0>(tstride_) == 0) { q /= cute::get<0>(tstride_); } else { continue; } if (detail::is_activation_in_bounds(tensor_a_, n, q, k)) { accumulator += ElementAcc(tensor_a_(k, q, n) * tensor_b_(c, s, k)); } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, w, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[c]); } output = epi_activation(output); tensor_d_(c, w, n) = output_converter(output); } } } } // Specialization for 2D dgrad kernel void dgrad_reference(cute::Int<2> spatial_dims) { int32_t N = size<3>(tensor_d_); int32_t H = size<2>(tensor_d_); int32_t W = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); int32_t K = size<3>(tensor_b_); int32_t R = size<2>(tensor_b_); int32_t S = size<1>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t h = 0; h < H; ++h) { for (int32_t w = 0; w < W; ++w) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t k = 0; k < K; ++k) { for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_); int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_); if (q % cute::get<0>(tstride_) == 0) { q /= cute::get<0>(tstride_); } else { continue; } if (p % cute::get<1>(tstride_) == 0) { p /= cute::get<1>(tstride_); } else { continue; } if (detail::is_activation_in_bounds(tensor_a_, n, p, q, k)) { accumulator += ElementAcc(tensor_a_(k, q, p, n) * tensor_b_(c, s, r, k)); } } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, w, h, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[c]); } output = epi_activation(output); tensor_d_(c, w, h, n) = output_converter(output); } } } } } // Specialization for 3D dgrad kernel void dgrad_reference(cute::Int<3> spatial_dims) { int32_t N = size<4>(tensor_d_); int32_t D = size<3>(tensor_d_); int32_t H = size<2>(tensor_d_); int32_t W = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); int32_t K = size<4>(tensor_b_); int32_t T = size<3>(tensor_b_); int32_t R = size<2>(tensor_b_); int32_t S = size<1>(tensor_b_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t n = 0; n < N; ++n) { for (int32_t d = 0; d < D; ++d) { for (int32_t h = 0; h < H; ++h) { for (int32_t w = 0; w < W; ++w) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t k = 0; k < K; ++k) { for (int32_t t = 0; t < T; ++t) { for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_); int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_); int32_t z = d + cute::get<2>(padding_) - t * cute::get<2>(dilation_); if (q % cute::get<0>(tstride_) == 0) { q /= cute::get<0>(tstride_); } else { continue; } if (p % cute::get<1>(tstride_) == 0) { p /= cute::get<1>(tstride_); } else { continue; } if (z % cute::get<2>(tstride_) == 0) { z /= cute::get<2>(tstride_); } else { continue; } if (detail::is_activation_in_bounds(tensor_a_, n, z, p, q, k)) { accumulator += ElementAcc(tensor_a_(k, q, p, z, n) * tensor_b_(c, s, r, t, k)); } } } } } ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta; ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, w, h, d, n)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[c]); } output = epi_activation(output); tensor_d_(c, w, h, d, n) = output_converter(output); } } } } } } // Specialization for 1D wgrad kernel void wgrad_reference(cute::Int<1> spatial_dims) { int32_t N = size<2>(tensor_a_); int32_t Q = size<1>(tensor_a_); int32_t K = size<0>(tensor_a_); int32_t S = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); #if defined(_OPENMP) #pragma omp parallel for collapse(2) #endif for (int32_t k = 0; k < K; ++k) { ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t n = 0; n < N; ++n) { for (int32_t q = 0; q < Q; ++q) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); if (detail::is_activation_in_bounds(tensor_b_, n, w, c)) { accumulator += ElementAcc(tensor_b_(c, w, n) * tensor_a_(k, q, n)); } } } ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, s, k)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(c, s, k) = output_converter(output); } } } } // Specialization for 2D wgrad kernel void wgrad_reference(cute::Int<2> spatial_dims) { int32_t N = size<3>(tensor_a_); int32_t P = size<2>(tensor_a_); int32_t Q = size<1>(tensor_a_); int32_t K = size<0>(tensor_a_); int32_t R = size<2>(tensor_d_); int32_t S = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t k = 0; k < K; ++k) { ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t n = 0; n < N; ++n) { for (int32_t p = 0; p < P; ++p) { for (int32_t q = 0; q < Q; ++q) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_); if (detail::is_activation_in_bounds(tensor_b_, n, h, w, c)) { accumulator += ElementAcc(tensor_b_(c, w, h, n) * tensor_a_(k, q, p, n)); } } } } ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, s, r, k)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(c, s, r, k) = output_converter(output); } } } } } // Specialization for 3D wgrad kernel void wgrad_reference(cute::Int<3> spatial_dims) { int32_t N = size<4>(tensor_a_); int32_t Z = size<3>(tensor_a_); int32_t P = size<2>(tensor_a_); int32_t Q = size<1>(tensor_a_); int32_t K = size<0>(tensor_a_); int32_t T = size<3>(tensor_d_); int32_t R = size<2>(tensor_d_); int32_t S = size<1>(tensor_d_); int32_t C = size<0>(tensor_d_); #if defined(_OPENMP) #pragma omp parallel for collapse(3) #endif for (int32_t k = 0; k < K; ++k) { ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ? epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha; ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ? epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta; for (int32_t t = 0; t < T; ++t) { for (int32_t r = 0; r < R; ++r) { for (int32_t s = 0; s < S; ++s) { for (int32_t c = 0; c < C; ++c) { auto accumulator = ElementAcc(0); for (int32_t n = 0; n < N; ++n) { for (int32_t z = 0; z < Z; ++z) { for (int32_t p = 0; p < P; ++p) { for (int32_t q = 0; q < Q; ++q) { int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_); int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_); int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_); if (detail::is_activation_in_bounds(tensor_b_, n, d, h, w, c)) { accumulator += ElementAcc(tensor_b_(c, w, h, d, n) * tensor_a_(k, q, p, z, n)); } } } } } ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) + scale_converter(beta) * residual_converter(tensor_c_(c, s, r, t, k)); if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) { output += bias_converter(epi_fusion_params_.tensor_bias[k]); } output = epi_activation(output); tensor_d_(c, s, r, t, k) = output_converter(output); } } } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // cutlass::reference::host /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/reference/host/conv.hpp/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/host/conv.hpp", "repo_id": "cutlass", "token_count": 13401 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Provides several functions for filling tensors with data. */ #pragma once // Standard Library includes #include <utility> #include <cstdlib> #include <cmath> #include <random> #include <stdexcept> // Cutlass includes #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/quaternion.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/subbyte_reference.h" #include "cutlass/tensor_view.h" #include "cutlass/tensor_view_planar_complex.h" #include "cutlass/blas3.h" #include "cutlass/util/distribution.h" #include "tensor_foreach.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reference { namespace host { /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; Element value; // // Methods // TensorFillFunc( TensorView const &view_ = TensorView(), Element value_ = Element(0) ): view(view_), value(value_) { } void operator()(Coord<Layout::kRank> const & coord) const { view.at(coord) = value; } }; /// Returns a pair of values of the Gaussian distribution generated by the Box Muller method struct BoxMullerFunc { BoxMullerFunc() {} void operator()( double* rnd, ///< Size-2 vector to be filled with random values double mean = 0, ///< Mean of the Gaussian distribution double stddev = 1, ///< Standard deviation of the Gaussian distribution double pi = std::acos(-1)) const { double u1 = double(std::rand()) / double(RAND_MAX); double u2 = double(std::rand()) / double(RAND_MAX); rnd[0] = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2); rnd[1] = std::sqrt(-2 * std::log(u1)) * std::sin(2 * pi * u2); rnd[0] = mean + stddev * rnd[0]; rnd[1] = mean + stddev * rnd[1]; } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with a uniform value template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFill( TensorView<Element, Layout> dst, ///< destination tensor Element val = Element(0)) { ///< value to uniformly fill it with detail::TensorFillFunc<Element, Layout> func(dst, val); TensorForEach( dst.extent(), func ); } /// Fills a tensor with a uniform value template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFill( TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor cutlass::complex<Element> val = cutlass::complex<Element>(0)) { ///< value to uniformly fill it with TensorFill(dst.view_real(), val.real()); TensorFill(dst.view_imag(), val.imag()); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element> struct RandomGaussianFunc { uint64_t seed; double mean; double stddev; int int_scale; double pi; double pnz; // // Methods // RandomGaussianFunc( uint64_t seed_ = 0, double mean_ = 0, double stddev_ = 1, int int_scale_ = -1, double pnz_ = 100.0 ): seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state Element operator()() const { // Box-Muller transform to generate random numbers with Normal distribution double u1 = double(std::rand()) / double(RAND_MAX); double u2 = double(std::rand()) / double(RAND_MAX); // Compute Gaussian random value double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2); rnd = mean + stddev * rnd; // Scale and convert final result Element result; // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian std::random_device rnd_device; std::mt19937 bernoulli_rnd(rnd_device()); std::bernoulli_distribution bernoulli_dist(pnz / 100); bool bernoulli_result = bernoulli_dist(bernoulli_rnd); // Sample from the Gaussian distribution for a nonzero element if (bernoulli_result) { if (int_scale >= 0) { rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale); result = static_cast<Element>(rnd); } else { result = static_cast<Element>(rnd); } } else { result = static_cast<Element>(0); } return result; } }; /// Partial specialization for initializing a complex value. template <typename Element> struct RandomGaussianFunc<complex<Element> > { uint64_t seed; double mean; double stddev; int int_scale; double pi; double pnz; // // Methods // RandomGaussianFunc( uint64_t seed_ = 0, double mean_ = 0, double stddev_ = 1, int int_scale_ = -1, double pnz_ = 100.0 ): seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state complex<Element> operator()() const { Element reals[2]; double rnd[2]; detail::BoxMullerFunc func; func(rnd, mean, stddev, pi); // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian std::random_device rnd_device; std::mt19937 bernoulli_rnd(rnd_device()); std::bernoulli_distribution bernoulli_dist(pnz / 100); bool bernoulli_result = bernoulli_dist(bernoulli_rnd); // Sample from the Gaussian distribution for a nonzero element if (bernoulli_result) { if (int_scale >= 0) { rnd[0] = double(int(rnd[0] * double(1 << int_scale))); rnd[1] = double(int(rnd[1] * double(1 << int_scale))); reals[0] = from_real<Element>(rnd[0] / double(1 << int_scale)); reals[1] = from_real<Element>(rnd[1] / double(1 << int_scale)); } else { reals[0] = from_real<Element>(rnd[0]); reals[1] = from_real<Element>(rnd[1]); } } else { reals[0] = from_real<Element>(0); reals[1] = from_real<Element>(0); } return complex<Element>(reals[0], reals[1]); } }; /// Partial specialization for initializing a complex value. template <typename Element> struct RandomGaussianFunc<Quaternion<Element> > { uint64_t seed; double mean; double stddev; int int_scale; double pi; double pnz; // // Methods // RandomGaussianFunc( uint64_t seed_ = 0, double mean_ = 0, double stddev_ = 1, int int_scale_ = -1, double pnz_ = 100.0 ): seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state Quaternion<Element> operator()() const { Element reals[4]; double rnd1[2]; double rnd2[2]; detail::BoxMullerFunc func; func(rnd1, mean, stddev, pi); func(rnd2, mean, stddev, pi); // Sample from the Bernoulli distribution, and use the result to sample from the Gaussian std::random_device rnd_device; std::mt19937 bernoulli_rnd(rnd_device()); std::bernoulli_distribution bernoulli_dist(pnz / 100); bool bernoulli_result = bernoulli_dist(bernoulli_rnd); // Sample from the Gaussian distribution for a nonzero element if (bernoulli_result) { if (int_scale >= 0) { rnd1[0] = double(int(rnd1[0] * double(1 << int_scale))); rnd1[1] = double(int(rnd1[1] * double(1 << int_scale))); rnd2[0] = double(int(rnd2[0] * double(1 << int_scale))); rnd2[1] = double(int(rnd2[1] * double(1 << int_scale))); reals[0] = from_real<Element>(rnd1[0] / double(1 << int_scale)); reals[1] = from_real<Element>(rnd1[1] / double(1 << int_scale)); reals[2] = from_real<Element>(rnd2[0] / double(1 << int_scale)); reals[3] = from_real<Element>(rnd2[1] / double(1 << int_scale)); } else { reals[0] = from_real<Element>(rnd1[0]); reals[1] = from_real<Element>(rnd1[1]); reals[2] = from_real<Element>(rnd2[0]); reals[3] = from_real<Element>(rnd2[1]); } } else { reals[0] = from_real<Element>(0); reals[1] = from_real<Element>(0); reals[2] = from_real<Element>(0); reals[3] = from_real<Element>(0); } return Quaternion<Element>(reals[0], reals[1], reals[2], reals[3]); } }; /// Computes a random Gaussian distribution template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillGaussianFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomGaussianFunc<Element> func; // // Methods // /// Construction of Gaussian RNG functor. TensorFillGaussianFunc( TensorView view_ = TensorView(), RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>() ): view(view_), func(func_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { view.at(coord) = func(); } }; /// Computes a random Gaussian distribution for a rank-2 tensor template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillSymmetricGaussianFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomGaussianFunc<Element> func; cutlass::FillMode fill_mode; // // Methods // /// Construction of Gaussian RNG functor. TensorFillSymmetricGaussianFunc( TensorView view_ = TensorView(), RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>(), cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid ): view(view_), func(func_), fill_mode(fill_mode_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { // Fill half of matrix based on FillMode if (Layout::kRank == 2 && fill_mode == cutlass::FillMode::kLower && coord[0] >= coord[1]) { view.at(coord) = func(); } else if (Layout::kRank == 2 && fill_mode == cutlass::FillMode::kUpper && coord[0] <= coord[1]) { view.at(coord) = func(); } } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values with a Gaussian distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomGaussian( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG double mean = 0, ///< Gaussian distribution's mean double stddev = 1, ///< Gaussian distribution's standard deviation int bits = -1, ///< If non-negative, specifies number of fractional bits that double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz); detail::TensorFillGaussianFunc<Element, Layout> func( dst, random_func ); TensorForEach( dst.extent(), func ); } /// Fills a tensor with random values with a Gaussian distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomGaussian( TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG double mean = 0, ///< Gaussian distribution's mean double stddev = 1, ///< Gaussian distribution's standard deviation int bits = -1, ///< If non-negative, specifies number of fractional bits that double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of /// data. TensorFillRandomGaussian(dst.view_real(), seed, mean, stddev, bits, pnz); TensorFillRandomGaussian(dst.view_imag(), ~seed, mean, stddev, bits, pnz); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a Gaussian distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillSymmetricRandomGaussian( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices double mean = 0, ///< Gaussian distribution's mean double stddev = 1, ///< Gaussian distribution's standard deviation int bits = -1, ///< If non-negative, specifies number of fractional bits that double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz); detail::TensorFillSymmetricGaussianFunc<Element, Layout> func( dst, random_func, fill_mode ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values of a Gaussian distribution. template < typename Element ///< Element type > void BlockFillRandomGaussian( Element *ptr, ///< destination buffer size_t capacity, ///< number of elements uint64_t seed, ///< seed for RNG double mean = 0, ///< Gaussian distribution's mean double stddev = 1, ///< Gaussian distribution's standard deviation int bits = -1, ///< If non-negative, specifies number of fractional bits that double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz); for (size_t i = 0; i < capacity; ++i) { ReferenceFactory<Element>::get(ptr, i) = random_func(); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element> struct RandomUniformFunc { using Real = typename RealType<Element>::Type; uint64_t seed; double range; double min; int int_scale; // // Methods // RandomUniformFunc( uint64_t seed_ = 0, double max = 1, double min_ = 0, int int_scale_ = -1 ): seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state Element operator()() const { double rnd = double(std::rand()) / double(RAND_MAX); rnd = min + range * rnd; // Random values are cast to integer after scaling by a power of two to facilitate error // testing Element result; if (int_scale >= 0) { rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale); result = static_cast<Element>(Real(rnd)); } else { result = static_cast<Element>(Real(rnd)); } return result; } }; /// Partial specialization for initializing a complex value. template <typename Element> struct RandomUniformFunc<complex<Element> > { using Real = typename RealType<Element>::Type; uint64_t seed; double range; double min; int int_scale; // // Methods // RandomUniformFunc( uint64_t seed_ = 0, double max = 1, double min_ = 0, int int_scale_ = -1 ): seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state complex<Element> operator()() const { Element reals[2]; for (int i = 0; i < 2; ++i) { double rnd = double(std::rand()) / double(RAND_MAX); rnd = min + range * rnd; // Random values are cast to integer after scaling by a power of two to facilitate error // testing if (int_scale >= 0) { rnd = double(int(rnd * double(1 << int_scale))); reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale))); } else { reals[i] = from_real<Element>(Real(rnd)); } } return complex<Element>(reals[0], reals[1]); } }; /// Partial specialization for initializing a Quaternion value. template <typename Element> struct RandomUniformFunc<Quaternion<Element> > { using Real = typename RealType<Element>::Type; uint64_t seed; double range; double min; int int_scale; // // Methods // RandomUniformFunc( uint64_t seed_ = 0, double max = 1, double min_ = 0, int int_scale_ = -1 ): seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) { std::srand((unsigned)seed); } /// Compute random value and update RNG state Quaternion<Element> operator()() const { Element reals[4]; for (int i = 0; i < 4; ++i) { double rnd = double(std::rand()) / double(RAND_MAX); rnd = min + range * rnd; // Random values are cast to integer after scaling by a power of two to facilitate error // testing if (int_scale >= 0) { rnd = double(int(rnd * double(1 << int_scale))); reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale))); } else { reals[i] = from_real<Element>(Real(rnd)); } } return make_Quaternion(reals[0], reals[1], reals[2], reals[3]); } }; /// Computes a random uniform distribution template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillRandomUniformFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomUniformFunc<Element> func; // // Methods // /// Construction of uniform RNG functor. TensorFillRandomUniformFunc( TensorView view_ = TensorView(), RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>() ): view(view_), func(func_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { view.at(coord) = func(); } }; /// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a uniform distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillSymmetricRandomUniformFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomUniformFunc<Element> func; cutlass::FillMode fill_mode; // // Methods // /// Construction of uniform RNG functor. TensorFillSymmetricRandomUniformFunc( TensorView view_ = TensorView(), RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(), cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid ): view(view_), func(func_), fill_mode(fill_mode_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { // Fill half of matrix based on FillMode if (Layout::kRank == 2 && fill_mode == cutlass::FillMode::kLower && coord[0] >= coord[1]) { view.at(coord) = func(); } else if (Layout::kRank == 2 && fill_mode == cutlass::FillMode::kUpper && coord[0] <= coord[1]) { view.at(coord) = func(); } } }; /// Computes a random Uniform distribution and pads diagonal with zeros template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillPadDiagonalRandomUniformFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomUniformFunc<Element> func; cutlass::FillMode fill_mode; int alignment; // // Methods // /// Construction of uniform RNG functor. TensorFillPadDiagonalRandomUniformFunc( TensorView view_ = TensorView(), RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(), cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid, int alignment_ = 1 ): view(view_), func(func_), fill_mode(fill_mode_), alignment(alignment_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { // Fill half of matrix based on FillMode if (Layout::kRank == 2 && (fill_mode == cutlass::FillMode::kLower) && (coord[0] >= coord[1]) || ((coord[1] - coord[0]) >= alignment)) { view.at(coord) = func(); } else if (Layout::kRank == 2 && fill_mode == cutlass::FillMode::kUpper && (coord[0] <= coord[1]) || ((coord[0] - coord[1]) >= alignment)) { view.at(coord) = func(); } } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values of a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomUniform( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1) { ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomUniformFunc<Element> random_func(seed, max, min, bits); detail::TensorFillRandomUniformFunc<Element, Layout> func( dst, random_func ); TensorForEach( dst.extent(), func ); } /// Fills a tensor with random values of a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomUniform( TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1) { ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. TensorFillRandomUniform(dst.view_real(), seed, max, min, bits); TensorFillRandomUniform(dst.view_imag(), ~seed, max, min, bits); } /// Fills a tensor with random values with a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomUniform( TensorView<Quaternion<Element>, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1) { ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomUniformFunc<Quaternion<Element>> random_func(seed, max, min, bits); detail::TensorFillRandomUniformFunc<Quaternion<Element>, Layout> func( dst, random_func ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values with a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillSymmetricRandomUniform( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1) { ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomUniformFunc<Element> random_func(seed, max, min, bits); detail::TensorFillSymmetricRandomUniformFunc<Element, Layout> func( dst, random_func, fill_mode ); TensorForEach( dst.extent(), func ); } /// Fills a tensor with random values with a uniform random distribution pads zeros along diagonal template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillPadDiagonalRandomUniform( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1, ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. int alignment = 1 ) { detail::RandomUniformFunc<Element> random_func(seed, max, min, bits); detail::TensorFillPadDiagonalRandomUniformFunc<Element, Layout> func( dst, random_func, fill_mode, alignment ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with a uniform value template < typename Element ///< Element type > void BlockFill( Element *ptr, size_t capacity, Element val ) { for (size_t i = 0; i < capacity; ++i) { ReferenceFactory<Element>::get(ptr, i) = val; } } /// Fills a tensor with random values with a uniform random distribution. template < typename Element ///< Element type > void BlockFillRandomUniform( Element *ptr, size_t capacity, uint64_t seed, ///< seed for RNG double max = 1, ///< upper bound of distribution double min = 0, ///< lower bound for distribution int bits = -1) { ///< If non-negative, specifies number of fractional bits that /// are not truncated to zero. Permits reducing precision of /// data. detail::RandomUniformFunc<Element> random_func(seed, max, min, bits); for (size_t i = 0; i < capacity; ++i) { ReferenceFactory<Element>::get(ptr, i) = random_func(); } } /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillDiagonalFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; Element diag; Element other; // // Methods // TensorFillDiagonalFunc( TensorView const &view_ = TensorView(), Element diag_ = Element(1), Element other_ = Element(0) ): view(view_), diag(diag_), other(other_) { } void operator()(Coord<Layout::kRank> const & coord) const { bool is_diag = true; CUTLASS_PRAGMA_UNROLL for (int i = 1; i < Layout::kRank; ++i) { if (coord[i] != coord[i - 1]) { is_diag = false; break; } } view.at(coord) = (is_diag ? diag : other); } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor everywhere with a unique value for its diagonal. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillDiagonal( TensorView<Element, Layout> dst, ///< destination tensor Element diag = Element(1), ///< value to write in the diagonal Element other = Element(0)) { ///< value to write off the diagonal detail::TensorFillDiagonalFunc<Element, Layout> func( dst, diag, other ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to fill a tensor's diagonal with 1 and 0 everywhere else. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillIdentity( TensorView<Element, Layout> dst) { ///< destination tensor TensorFillDiagonal(dst, Element(1), Element(0)); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorUpdateDiagonal( TensorView<Element, Layout> dst, ///< destination tensor Element val = Element(1)) { typename Layout::Index extent = dst.extent().min(); for (typename Layout::Index i = 0; i < extent; ++i) { Coord<Layout::kRank> coord(i); dst.at(coord) = val; } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorUpdateOffDiagonalFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; Element other; // // Methods // TensorUpdateOffDiagonalFunc( TensorView const &view_ = TensorView(), Element other_ = Element(0) ): view(view_), other(other_) { } void operator()(Coord<Layout::kRank> const & coord) const { bool is_diag = true; CUTLASS_PRAGMA_UNROLL for (int i = 1; i < Layout::kRank; ++i) { if (coord[i] != coord[i - 1]) { is_diag = false; break; } } if (!is_diag) { view.at(coord) = other; } } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Writes a uniform value to all elements in the tensor without modifying diagonal elements. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorUpdateOffDiagonal( TensorView<Element, Layout> dst, ///< destination tensor Element other = Element(1)) { detail::TensorUpdateOffDiagonalFunc<Element, Layout> func( dst, other ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillLinearFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; Array<Element, Layout::kRank> v; Element s; // // Methods // TensorFillLinearFunc() { } /// Constructs functor TensorFillLinearFunc( TensorView const &view_, Array<Element, Layout::kRank> const & v_, Element s_ = Element(0) ): view(view_), v(v_), s(s_) { } /// Updates the tensor void operator()(Coord<Layout::kRank> const & coord) const { Element sum(s); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank; ++i) { sum += Element(coord[i]) * v[i]; } view.at(coord) = sum; } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills tensor with a linear combination of its coordinate and another vector template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillLinear( TensorView<Element, Layout> dst, ///< destination tensor Array<Element, Layout::kRank> const & v, Element s = Element(0)) { detail::TensorFillLinearFunc<Element, Layout> func( dst, v, s ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills tensor with a linear combination of its coordinate and another vector template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillSequential( TensorView<Element, Layout> dst, ///< destination tensor Element s = Element(0)) { Array<Element, Layout::kRank> stride; stride[0] = Element(1); CUTLASS_PRAGMA_UNROLL for (int i = 1; i < Layout::kRank; ++i) { stride[i] = stride[i - 1] * Element(dst.extent()[i - 1]); } TensorFillLinear(dst, stride, s); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values from a distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandom( TensorView<Element, Layout> view, ///< destination tensor uint64_t seed, Distribution dist) { using Real = typename RealType<Element>::Type; if (dist.kind == Distribution::Gaussian) { TensorFillRandomGaussian( view, seed, dist.gaussian.mean, dist.gaussian.stddev, dist.int_scale); } else if (dist.kind == Distribution::Uniform) { TensorFillRandomUniform( view, seed, dist.uniform.max, dist.uniform.min, dist.int_scale); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a block of data with sequential elements template < typename Element > void BlockFillSequential( Element *ptr, int64_t capacity, Element v = Element(1), Element s = Element(0)) { int i = 0; while (i < capacity) { cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value < 8)>::get(ptr, i) = s; s = Element(s + v); ++i; } } /// Fills a block of data with sequential elements template < typename Element > void BlockFillSequentialModN( Element *ptr, int64_t capacity, int64_t mod, int64_t v = int64_t(1), int64_t s = int64_t(0)) { int i = 0; while (i < capacity) { cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value < 8)>::get(ptr, i) = Element(s); s = int64_t(s + v) % mod; ++i; } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a block of data with sequential elements template < typename Element > void BlockFillRandom( Element *ptr, size_t capacity, uint64_t seed, Distribution dist) { if (dist.kind == Distribution::Gaussian) { BlockFillRandomGaussian<Element>( ptr, capacity, seed, dist.gaussian.mean, dist.gaussian.stddev, dist.int_scale, dist.gaussian.pnz); } else if (dist.kind == Distribution::Uniform) { BlockFillRandomUniform<Element>( ptr, capacity, seed, dist.uniform.max, dist.uniform.min, dist.int_scale); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element> struct RandomSparseMetaFunc { uint64_t seed; int range; int MetaSizeInBits; // // Methods // RandomSparseMetaFunc( uint64_t seed_ = 0, int MetaSizeInBits_ = 2 ): seed(seed_), MetaSizeInBits(MetaSizeInBits_) { std::srand((unsigned)seed); if (MetaSizeInBits_ == 2) { range = 6; } else if (MetaSizeInBits_ == 4) { range = 2; } else { throw std::invalid_argument("Invalid MetaSizeInBits"); } } /// Compute random value and update RNG state Element operator()() const { Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe}; Element TwoToOneMeta[2] = {0x4, 0xe}; Element * MetaArray = (MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta; Element result = 0x0; for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) { int rnd = std::rand() % range; Element meta = MetaArray[rnd]; result = (Element)(result | ((Element)(meta << (i * 4)))); } return result; } }; /// Computes a random sparse meta template < typename Element, ///< Element type typename Layout> ///< Layout function struct TensorFillRandomSparseMetaFunc { using TensorView = TensorView<Element, Layout>; // // Data members // TensorView view; RandomSparseMetaFunc<Element> func; // // Methods // /// Construction of Gaussian RNG functor. TensorFillRandomSparseMetaFunc( TensorView view_ = TensorView(), RandomSparseMetaFunc<Element> func_ = RandomSparseMetaFunc<Element>() ): view(view_), func(func_) { } /// Compute random value and update RNG state void operator()(Coord<Layout::kRank> const &coord) const { view.at(coord) = func(); } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values with a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomSparseMeta( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG int MetaSizeInBits) { ///< 2 bit or 4 bit detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits); detail::TensorFillRandomSparseMetaFunc<Element, Layout> func( dst, random_func ); TensorForEach( dst.extent(), func ); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a tensor with random values with a uniform random distribution. template < typename Element ///< Element type > void BlockFillRandomSparseMeta( Element *ptr, size_t capacity, uint64_t seed, ///< seed for RNG int MetaSizeInBits) { ///< 2 bit or 4bit detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits); for (size_t i = 0; i < capacity; ++i) { ptr[i] = random_func(); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Fills a ell block index matrix with random values with a uniform random distribution. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorFillRandomEllIdx( TensorView<Element, Layout> dst, ///< destination tensor uint64_t seed, ///< seed for RNG int rows, int ell_cols, int cols) { ///< dimension of the matrix std::srand((unsigned)seed); for (int i = 0; i < rows; ++i) { int col_idx = std::rand() % cols; for (int j = 0; j < ell_cols; ++j) { dst.at({i, j}) = col_idx; if (col_idx != -1) { if (col_idx == (cols - 1)) { col_idx = -1; } else { col_idx = std::rand() % (cols - col_idx - 1) + col_idx + 1; } } } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Copies a diagonal in from host memory without modifying off-diagonal elements. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorCopyDiagonalIn( TensorView<Element, Layout> dst, ///< destination tensor Element const *ptr) { ///< dense buffer of elements typename Layout::Index extent = dst.extent().min(); for (typename Layout::Index i = 0; i < extent; ++i) { Coord<Layout::kRank> coord(i); dst.at(coord) = ReferenceFactory<Element>::get(ptr, i); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /// Copies the diagonal of a tensor into a dense buffer in host memory. template < typename Element, ///< Element type typename Layout> ///< Layout function void TensorCopyDiagonalOut( Element *ptr, ///< dense buffer of elements TensorView<Element, Layout> src) { ///< source tensor typename Layout::Index extent = src.extent().min(); for (typename Layout::Index i = 0; i < extent; ++i) { Coord<Layout::kRank> coord(i); ReferenceFactory<Element>::get(ptr, i) = src.at(coord); } } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace host } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h", "repo_id": "cutlass", "token_count": 17993 }
64
var searchData= [ ['noexcept',['noexcept',['../platform_8h.html#a189faadd7f99f6c354db09acbb2aafcd',1,'platform.h']]], ['nullptr',['nullptr',['../platform_8h.html#ab979d9d4b4923f7c54d6caa6e1a61936',1,'platform.h']]] ];
cutlass/docs/search/defines_2.js/0
{ "file_path": "cutlass/docs/search/defines_2.js", "repo_id": "cutlass", "token_count": 109 }
0
var searchData= [ ['numerictypeid',['NumericTypeID',['../namespacecutlass_1_1library.html#a366ecc865ac5b24cfdfd392199ba8e9e',1,'cutlass::library']]] ];
cutlass/docs/search/enums_6.js/0
{ "file_path": "cutlass/docs/search/enums_6.js", "repo_id": "cutlass", "token_count": 68 }
1
var searchData= [ ['uniform',['Uniform',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa0fad91cf4fcbc8ab015053bea77090a6',1,'cutlass::Distribution']]] ];
cutlass/docs/search/enumvalues_5.js/0
{ "file_path": "cutlass/docs/search/enumvalues_5.js", "repo_id": "cutlass", "token_count": 82 }
2
var searchData= [ ['command_5fline_2eh',['command_line.h',['../command__line_8h.html',1,'']]], ['complex_2eh',['complex.h',['../complex_8h.html',1,'']]], ['conversion_5fop_2eh',['conversion_op.h',['../conversion__op_8h.html',1,'']]], ['coord_2eh',['coord.h',['../coord_8h.html',1,'']]], ['core_5fio_2eh',['core_io.h',['../core__io_8h.html',1,'']]], ['cutlass_2eh',['cutlass.h',['../cutlass_8h.html',1,'']]] ];
cutlass/docs/search/files_2.js/0
{ "file_path": "cutlass/docs/search/files_2.js", "repo_id": "cutlass", "token_count": 195 }
3
var searchData= [ ['layout_2eh',['layout.h',['../layout_8h.html',1,'']]], ['library_2eh',['library.h',['../library_8h.html',1,'']]], ['linear_5fcombination_2eh',['linear_combination.h',['../linear__combination_8h.html',1,'']]], ['linear_5fcombination_5fclamp_2eh',['linear_combination_clamp.h',['../linear__combination__clamp_8h.html',1,'']]], ['linear_5fcombination_5frelu_2eh',['linear_combination_relu.h',['../linear__combination__relu_8h.html',1,'']]], ['matrix_2eh',['matrix.h',['../layout_2matrix_8h.html',1,'']]] ];
cutlass/docs/search/files_a.js/0
{ "file_path": "cutlass/docs/search/files_a.js", "repo_id": "cutlass", "token_count": 225 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/transform/threadblock/predicated_vector_access_iterator.h" #include "cutlass/transform/threadblock/vector_iterator.h" #include "cutlass/transform/warp/vector_fragment_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "kernel/default_b2b_conv2d_fprop.h" #include "kernel/b2b_implicit_gemm_convolution.h" #include "threadblock/b2b_implicit_gemm_pipelined_smem_accumulator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm /// and 2 stage pipeline. /// Accumulator will be staged in shared memory. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultB2bConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, true > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA; using IteratorA0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, LayoutA, ThreadMapA0 > >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB; using IteratorB0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, LayoutB, ThreadMapB0 > >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 2; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Define iterators over tiles from the B operand using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB; using IteratorB1 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, LayoutB, ThreadMapB1 > >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp; using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Use fragment iterator for the accumulator using SmemAccumulatorLayout = cutlass::layout::RowMajor; using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< WarpShape0, InstructionShape, ElementAccumulator, typename WarpMmaTensorOp0::Policy::Operator::FragmentC, SmemAccumulatorLayout >; // Store Accumulator tiles to Shared Memory using SmemIteratorD0 = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape0, InstructionShape, ElementC, SmemAccumulatorLayout >; static int const kThreadCount = 32; // load warp tile from Shared Memory accumulator using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator< MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA, ElementA, SmemAccumulatorLayout, MatrixShape<InstructionShape::kM, InstructionShape::kK>, WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator< ThreadblockShape0, IteratorA0, SmemIteratorA0, IteratorB0, SmemIteratorB0, IteratorAccumulatorScaleBias, FragmentIteratorAccumulator, SmemIteratorD0, ThreadblockShape1, WarpIteratorA1, IteratorB1, SmemIteratorB1, ElementC, LayoutC, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1 >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage /// pipeline with interleaved layout. /// Accumulator will be staged in shared memory. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, typename MathOperatorTag, int InterleavedK > struct DefaultB2bConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, true > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapA0 = typename MmaCore0::SmemThreadMapA; using IteratorA0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA0 > >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB0 = typename MmaCore0::SmemThreadMapB; using IteratorB0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB0 > >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 4; //For interleaved layout using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Define iterators over tiles from the B operand using ThreadMapB1 = typename MmaCore1::SmemThreadMapB; using IteratorB1 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB1 > >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp; using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Use fragment iterator for the accumulator using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>; using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< WarpShape0, InstructionShape, ElementAccumulator, typename WarpMmaTensorOp0::Policy::Operator::FragmentC, SmemAccumulatorLayout >; // Store Accumulator tiles to Shared Memory using SmemIteratorD0 = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape0, InstructionShape, ElementC, SmemAccumulatorLayout >; static int const kThreadCount = 32; // load warp tile from Shared Memory accumulator using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical< MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA, ElementA, SmemAccumulatorLayout, MatrixShape<InstructionShape::kM, InstructionShape::kK>, WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator< ThreadblockShape0, IteratorA0, SmemIteratorA0, IteratorB0, SmemIteratorB0, IteratorAccumulatorScaleBias, FragmentIteratorAccumulator, SmemIteratorD0, ThreadblockShape1, WarpIteratorA1, IteratorB1, SmemIteratorB1, ElementC, LayoutC, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1 >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm /// and 2 stage pipeline. /// Accumulator will be staged in shared memory. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultB2bConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, true > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA; using IteratorA0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, LayoutA, ThreadMapA0 > >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB; using IteratorB0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, LayoutB, ThreadMapB0 > >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 2; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Define iterators over tiles from the B operand using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB; using IteratorB1 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, LayoutB, ThreadMapB1 > >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp; using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Use fragment iterator for the accumulator using SmemAccumulatorLayout = cutlass::layout::RowMajor; using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< WarpShape0, InstructionShape, ElementAccumulator, typename WarpMmaTensorOp0::Policy::Operator::FragmentC, SmemAccumulatorLayout >; // Store Accumulator tiles to Shared Memory using SmemIteratorD0 = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape0, InstructionShape, ElementC, SmemAccumulatorLayout >; static int const kThreadCount = 32; // load warp tile from Shared Memory accumulator using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator< MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA, ElementA, SmemAccumulatorLayout, MatrixShape<InstructionShape::kM, InstructionShape::kK>, WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator< ThreadblockShape0, IteratorA0, SmemIteratorA0, IteratorB0, SmemIteratorB0, IteratorAccumulatorScaleBias, FragmentIteratorAccumulator, SmemIteratorD0, ThreadblockShape1, WarpIteratorA1, IteratorB1, SmemIteratorB1, ElementC, LayoutC, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1 >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage /// pipeline with interleaved layout. /// Accumulator will be staged in shared memory. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, typename MathOperatorTag, int InterleavedK > struct DefaultB2bConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, true > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. // Define iterators over tiles from the A operand using ThreadMapA0 = typename MmaCore0::SmemThreadMapA; using IteratorA0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA0 > >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB0 = typename MmaCore0::SmemThreadMapB; using IteratorB0 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB0 > >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 4; //For interleaved layout using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; using ThreadMapB1 = typename MmaCore1::SmemThreadMapB; using IteratorB1 = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB1 > >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp; using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Use fragment iterator for the accumulator using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>; using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< WarpShape0, InstructionShape, ElementAccumulator, typename WarpMmaTensorOp0::Policy::Operator::FragmentC, SmemAccumulatorLayout >; // Store Accumulator tiles to Shared Memory using SmemIteratorD0 = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape0, InstructionShape, ElementC, SmemAccumulatorLayout >; static int const kThreadCount = 32; // load warp tile from Shared Memory accumulator using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical< MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA, ElementA, SmemAccumulatorLayout, MatrixShape<InstructionShape::kM, InstructionShape::kK>, WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator< ThreadblockShape0, IteratorA0, SmemIteratorA0, IteratorB0, SmemIteratorB0, IteratorAccumulatorScaleBias, FragmentIteratorAccumulator, SmemIteratorD0, ThreadblockShape1, WarpIteratorA1, IteratorB1, SmemIteratorB1, ElementC, LayoutC, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1 >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm75.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm75.h", "repo_id": "cutlass", "token_count": 10064 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped Back-to-back fused GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "threadblock/b2b_mma_base_smem_accumulator.h" #include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorA0_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA0_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Iterates over vectors of scale and bias vector in global memory // (concept: VectorIterator) typename IteratorAccumulatorScaleBias_, /// Iterates over accumulator tile typename FragmentIteratorAccumulator_, /// Iterates over accumulator tile in shared memory typename SmemIteratorD0_, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile in shared memory typename WarpIteratorA1_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...) typename OutputOp_, /// Policy describing tuning details (concept: MmaPipelinedPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPipelinedPolicy) typename Policy1_, /// Transformation applied to A0 operand typename TransformA0_ = NumericArrayConverter< typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>, /// /// Transformation applied to B0 operand typename TransformB0_ = NumericArrayConverter< typename SmemIteratorB0_::Element, typename IteratorB0_::Element, IteratorB0_::Fragment::kElements>, /// /// Transformation applied to B1 operand typename TransformB1_ = NumericArrayConverter< typename SmemIteratorB1_::Element, typename IteratorB1_::Element, IteratorB1_::Fragment::kElements>, /// Used for partial specialization typename Enable = bool > class B2bMmaPipelinedSmemAccumulator : public B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, 2> { public: ///< Base class using Base = B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, 2>; using Shape0 = Shape0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using IteratorA0 = IteratorA0_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA0; using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB0; using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< Iterates over tiles of the scale and bias vectors in global memory using Policy0 = Policy0_; ///< Policy0 describing tuning details using SmemIteratorA0 = SmemIteratorA0_; using SmemIteratorB0 = SmemIteratorB0_; using SmemIteratorD0 = SmemIteratorD0_; ///< Iterates over accumulator tile in shared memory using FragmentIteratorAccumulator = FragmentIteratorAccumulator_; ///< Iterates over accumulator tile using Shape1 = Shape1_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using IteratorB1 = IteratorB1_; ///< Iterates over tiles of B operand in global memory using Policy1 = Policy1_; ///< Policy1 describing tuning details using Policy = Policy1; ///< Export Policy1 as the threadblock-level Mma's policy using Shape = Shape1; using SmemIteratorB1 = SmemIteratorB1_; using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate accumulator tile in shared memory using ElementC = ElementC_; ///< Data type of accumulator matrix using LayoutC = LayoutC_; ///< Layout of accumulator matrix using OutputOp = OutputOp_; ///< Epilogue after 1st Gemm using TransformA0 = TransformA0_; using TransformB0 = TransformB0_; using TransformB1 = TransformB1_; // // Dependent types // /// Fragment of operand A loaded from global memory using FragmentA0 = typename IteratorA0::Fragment; /// Fragment of operand B loaded from global memory using FragmentB0 = typename IteratorB0::Fragment; /// Fragment of accumulator tile using FragmentC0 = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; /// Fragment of operand B loaded from global memory using FragmentB1 = typename IteratorB1::Fragment; /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Obtain the arch tag from the warp-level operator using ArchTag = typename Policy0::Operator::ArchTag; /// Complex transform on A0 operand static ComplexTransform const kTransformA0 = Operator0::kTransformA; /// Complex transform on B0 operand static ComplexTransform const kTransformB0 = Operator0::kTransformB; /// Complex transform on B1 operand static ComplexTransform const kTransformB1 = Operator1::kTransformB; /// Complex transform exports needed by higher-level kernels static ComplexTransform const kTransformA = kTransformA0; static ComplexTransform const kTransformB = kTransformB0; /// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2"); /// Epilog in shared memory using Epilogue0 = epilogue::threadblock::EpilogueSmemAccumulator< SmemIteratorD0, ///< SmemTileIterator FragmentIteratorAccumulator, ///< AccumulatorFragmentIterator IteratorAccumulatorScaleBias, ///< ScaleBiasIterator OutputOp>; ///< Output operator private: using WarpFragmentA0 = typename Operator0::FragmentA; using WarpFragmentB0 = typename Operator0::FragmentB; using WarpFragmentA1 = typename Operator1::FragmentA; using WarpFragmentB1 = typename Operator1::FragmentB; protected: /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA0 smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B0 operand to shared memory SmemIteratorB0 smem_iterator_B0_; /// Shared Memory Iterator to store accumulator tile SmemIteratorD0 smem_iterator_D0_; /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile WarpIteratorA1 warp_tile_iterator_A1_; /// Iterator to write threadblock-scoped tile of B1 operand to shared memory SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bMmaPipelinedSmemAccumulator( typename Base::B2bMmaSharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM int thread_idx, ///< ID within the threadblock int warp_idx, ///< ID of warp int lane_idx, ///< ID of each thread within a warp int problem_size_0_n ///< GEMM0 N is used for accumulator extent ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_B_ref(), thread_idx), smem_iterator_D0_(shared_storage.accumulator_shared_storage0.accum_ref(), lane_idx), warp_tile_iterator_A1_(shared_storage.accumulator_shared_storage0.accum_ref(), {Base::WarpGemm1::kM, problem_size_0_n}, lane_idx), smem_iterator_B1_(shared_storage.b2b_mma_shared_storage.shared_storage1.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn_0 = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_k_0 = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_m_0 = warp_idx_mn_0 % Base::WarpCount0::kM; int warp_idx_n_0 = warp_idx_mn_0 / Base::WarpCount0::kM; int tile_offset_k_0 = Base::kWarpGemmIterations0 * warp_idx_k_0; int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM; int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM; int tile_offset_k_1 = Base::kWarpGemmIterations1 * warp_idx_k_1; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m_0, tile_offset_k_0}); this->warp_tile_iterator_B0_.add_tile_offset({tile_offset_k_0, warp_idx_n_0}); warp_tile_iterator_A1_.add_tile_offset({warp_idx_m_1, tile_offset_k_1}); this->warp_tile_iterator_B1_.add_tile_offset({tile_offset_k_1, warp_idx_n_1}); // Add smem accumulator iterator warp offset smem_iterator_D0_.add_tile_offset({ warp_idx_m_0 * SmemIteratorD0::TileIterations::kRow, warp_idx_n_0 * SmemIteratorD0::TileIterations::kColumn}); } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( int gemm_k_iterations_0, ///< number of iterations of the mainloop FragmentC1 &accum, ///< destination accumulator tile IteratorA0 iterator_A, ///< iterator over A operand in global memory IteratorB0 iterator_B0, ///< iterator over B0 operand in global memory IteratorAccumulatorScaleBias iterator_accum0_scale, ///< iterator over D0 scale vector in global memory IteratorAccumulatorScaleBias iterator_accum0_bias, ///< iterator over D0 bias vector in global memory IteratorB1 iterator_B1, ///< iterator over B1 operand in global memory FragmentC0 const &src_accum, ///< source accumualtor tile OutputOp output_op_0, ///< epilogue operation after 1st Gemm TransformA0 transform_A0 = TransformA0(), ///< transformation applied to A0 fragment TransformB0 transform_B0 = TransformB0(), ///< transformation applied to B0 fragment TransformB1 transform_B1 = TransformB1()) { ///< transformation applied to B1 fragment // // Prologue // // Perform accumulation in the 'd' output operand FragmentC0 accum0 = src_accum; FragmentA0 tb_frag_A; FragmentB0 tb_frag_B0; tb_frag_A.clear(); tb_frag_B0.clear(); // The last kblock is loaded in the prolog iterator_A.load(tb_frag_A); iterator_B0.load(tb_frag_B0); ++iterator_A; ++iterator_B0; this->smem_iterator_A_.store(transform_A0(tb_frag_A)); this->smem_iterator_B0_.store(transform_B0(tb_frag_B0)); ++this->smem_iterator_A_; ++this->smem_iterator_B0_; __syncthreads(); // Pair of fragments used to overlap shared memory loads and math instructions WarpFragmentA0 warp_frag_A0[2]; WarpFragmentB0 warp_frag_B0[2]; this->warp_tile_iterator_A0_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_A0_.load(warp_frag_A0[0]); this->warp_tile_iterator_B0_.load(warp_frag_B0[0]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; Operator0 warp_mma0; int smem_write_stage_idx = 1; // Avoid reading out of bounds iterator_A.clear_mask(gemm_k_iterations_0 <= 1); iterator_B0.clear_mask(gemm_k_iterations_0 <= 1); // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing // shared memory loads (which have the tightest latency requirement). // // Mainloop // // Note: The main loop does not support Base::kWarpGemmIterations == 2. CUTLASS_GEMM_LOOP for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) { // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group // as the case may be. if (warp_mma_k == Base::kWarpGemmIterations0 - 1) { // Write fragments to shared memory this->smem_iterator_A_.store(transform_A0(tb_frag_A)); this->smem_iterator_B0_.store(transform_B0(tb_frag_B0)); __syncthreads(); ++this->smem_iterator_A_; ++this->smem_iterator_B0_; // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory if (smem_write_stage_idx == 1) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); } else { this->warp_tile_iterator_A0_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0}); } smem_write_stage_idx ^= 1; } this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; if (warp_mma_k == 0) { iterator_A.load(tb_frag_A); iterator_B0.load(tb_frag_B0); ++iterator_A; ++iterator_B0; // Avoid reading out of bounds if this was the last loop iteration iterator_A.clear_mask(gemm_k_iterations_0 <= 2); iterator_B0.clear_mask(gemm_k_iterations_0 <= 2); } warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0); } } /// Epilogue for the first Implicit Gemm Epilogue0 epilogue0; epilogue0(output_op_0, smem_iterator_D0_, accum0, iterator_accum0_scale, iterator_accum0_bias); __syncthreads(); //2nd Gemm // // Prologue // FragmentB1 tb_frag_B1; tb_frag_B1.clear(); // The last kblock is loaded in the prolog iterator_B1.load(tb_frag_B1); ++iterator_B1; this->smem_iterator_B1_.store(transform_B1(tb_frag_B1)); ++this->smem_iterator_B1_; __syncthreads(); // Pair of fragments used to overlap shared memory loads and math instructions WarpFragmentA1 warp_frag_A1[2]; WarpFragmentB1 warp_frag_B1[2]; this->warp_tile_iterator_B1_.set_kgroup_index(0); warp_tile_iterator_A1_.load(warp_frag_A1[0]); this->warp_tile_iterator_B1_.load(warp_frag_B1[0]); ++warp_tile_iterator_A1_; ++this->warp_tile_iterator_B1_; Operator1 warp_mma1; smem_write_stage_idx = 1; int gemm_k_iterations_1 = Shape0::kN / Shape1::kK; // Avoid reading out of bounds iterator_B1.clear_mask(gemm_k_iterations_1 <= 1); // // Mainloop // // Note: The main loop does not support Base::kWarpGemmIterations == 2. CUTLASS_PRAGMA_UNROLL for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) { // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group // as the case may be. if (warp_mma_k == Base::kWarpGemmIterations1 - 1) { // Write fragments to shared memory this->smem_iterator_B1_.store(transform_B1(tb_frag_B1)); __syncthreads(); ++this->smem_iterator_B1_; // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory if (smem_write_stage_idx == 1) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); } else { this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); } smem_write_stage_idx ^= 1; } this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); // skip warp tile loading for the last kgroup if(gemm_k_iterations_1 > 1 || warp_mma_k < Base::kWarpGemmIterations1 - 1) warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_; ++this->warp_tile_iterator_B1_; if (warp_mma_k == 0) { iterator_B1.load(tb_frag_B1); ++iterator_B1; // Avoid reading out of bounds if this was the last loop iteration iterator_B1.clear_mask(gemm_k_iterations_1 <= 2); } warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum); } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h", "repo_id": "cutlass", "token_count": 8415 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example requires NVIDIA Ampere GPU or later. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS Includes #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" // CUTLASS Utility Includes #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/gemm_complex.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Define the overal warp-level problem shape int const kM = 27; int const kN = 31; int const kK = 17; /////////////////////////////////////////////////////////////////////////////////////////////////// // Define a warp-level GEMM operator. // // This template could be part of the CUTLASS Template Library or implemented internally. This // wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be // instantiated in device code. namespace cutlass { namespace gemm { namespace warp { template < typename Shape, typename InstructionShape, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementScalar > class GemmTensorOp { public: using WarpShape = GemmShape< ((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM, ((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN, InstructionShape::kK >; using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor // Layout of C matrix >::Type; // Number of 'K groups' int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK; // Define a 'FragmentIterator' to iterate over slices of accumulators using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename MmaWarp::Shape, InstructionShape, double, typename MmaWarp::Policy::Operator::FragmentC, cutlass::layout::RowMajor >; // Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical< typename MmaWarp::Shape, InstructionShape, double, cutlass::layout::RowMajor >; using TensorRefA = typename MmaWarp::IteratorA::TensorRef; using TensorRefB = typename MmaWarp::IteratorB::TensorRef; using TensorRefC = typename AccumulatorTileIterator::TensorRef; public: CUTLASS_HOST_DEVICE GemmTensorOp() { } CUTLASS_DEVICE void operator()( ElementScalar alpha, TensorRefA ref_A, TensorRefB ref_B, ElementScalar beta, TensorRefC ref_C, TensorRefC ref_D, int lane_id) const { // Instantiate iterators pointing to slices of the A and B matrices in shared memory typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id); typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id); // Instantiate and clear accumulator tile holding the C matrix typename MmaWarp::FragmentC accum; accum.clear(); // Instantiate the warp-level matrix multiply operator MmaWarp mma_op; // Instantiate fragments holding the slice of the matrix held by each warp typename MmaWarp::FragmentA frag_A[2]; typename MmaWarp::FragmentB frag_B[2]; // Load fragments from shared memory iter_A.load(frag_A[0]); iter_B.load(frag_B[0]); ++iter_A; ++iter_B; // Load fragments from shared memory CUTLASS_PRAGMA_UNROLL for (int k = 0; k < kKgroups; ++k) { // Load fragments from shared memory iter_A.load(frag_A[(k + 1) % 2]); iter_B.load(frag_B[(k + 1) % 2]); ++iter_A; ++iter_B; // Compute the matrix multiply mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum); } // Instantiate iterators FragmentIterator accum_frag_it(accum); AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id); AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id); // Define function objects for linear scaling operation cutlass::multiplies<typename FragmentIterator::Fragment> mul_source; cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator; // Iterate over the epilogue components CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) { // Define storage for slices of the accumulators typename FragmentIterator::Fragment accum_fragment; typename FragmentIterator::Fragment source_fragment; // Select a slice of accumulators from the accumulator tile accum_frag_it.load(accum_fragment); ++accum_frag_it; // Load a corresponding slice from Shared memory source_tile_it.load(source_fragment); ++source_tile_it; // Compute linear scaling - alpha * AB + beta * C source_fragment = mul_source(beta, source_fragment); accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment); // Store the result to shared memory dest_tile_it.store(accum_fragment); ++dest_tile_it; } } }; } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held // in Shared Memory. __global__ void kernel( double *D_gmem, double alpha, double const *A_gmem, double const *B_gmem, double beta, double const *C_gmem) { // Define several matrices in shared memory __shared__ double A[kM][kK]; __shared__ double B[kN][kK]; __shared__ double C[kM][kN]; // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { for (int k = 0; k < kK; ++k) { A[m][k] = A_gmem[m * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { for (int k = 0; k < kK; ++k) { B[n][k] = B_gmem[n * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { C[m][n] = C_gmem[m * kN + n]; } } } __syncthreads(); // // Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4), // overall shape, data type of each operand, and layout of each operand. // using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp< cutlass::gemm::GemmShape<kM, kN, kK>, cutlass::gemm::GemmShape<8, 8, 4>, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor, // Layout of C matrix double // Scalar type of alpha and beta >; // Instantiate the GEMM operator GemmTensorOp gemm; // Execute the warp-level GEMM operation gemm( alpha, {&A[0][0], kK}, {&B[0][0], kK}, beta, {&C[0][0], kN}, {&C[0][0], kN}, threadIdx.x); __syncthreads(); // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { D_gmem[m * kN + n] = C[m][n]; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to canonical warp-level GEMM operation int main(int argc, const char *arg[]) { bool notSupported = false; // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "This example requires compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK}); cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN}); uint64_t seed = 2020; double max = 8; double min = -8; cutlass::reference::host::TensorFillRandomUniform( A.host_view(), seed, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( B.host_view(), seed + 17, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( C.host_view(), seed + 31, max, min, 0 ); A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); dim3 grid(1,1); dim3 block(32, 1, 1); double alpha = 2.25; double beta = 1.24; kernel<<< grid, block >>>( D.device_data(), alpha, A.device_data(), B.device_data(), beta, C.device_data() ); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Failed to synchronize device after kernel launch." << std::endl; return -1; } D.sync_host(); // Compute reference on host cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false); cutlass::reference::host::GemmComplex( {kM, kN, kK}, alpha, A.host_ref(), cutlass::ComplexTransform::kNone, B.host_ref(), cutlass::ComplexTransform::kNone, beta, C.host_ref(), D_ref.host_ref(), double() ); // Verify reference matches computed if (!cutlass::reference::host::TensorEquals( D.host_view(), D_ref.host_view())) { std::cerr << "A =\n" << A.host_view() << "\n\nB = \n" << B.host_view() << "\n\nC = " << C.host_view() << "\n\nRef =\n" << D_ref.host_view() << "\n\nD =\n" << D.host_view() << "\n\n"; std::cerr << "Error - device results mismatch host reference." << std::endl; return -1; } std::cout << "Passed" << std::endl; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/19_tensorop_canonical/tensorop_canonical.cu/0
{ "file_path": "cutlass/examples/19_tensorop_canonical/tensorop_canonical.cu", "repo_id": "cutlass", "token_count": 5070 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in fp32 data and convert them implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional fp32 data by using NVIDIA Ampere architecture. We can use the tf32 mode of tensor core to emulate a fast accurate SGEMM kernel which is accelerated using Ampere Tensor Cores (see include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h). The trick is very simple a x b = (a_big + a_small) x (b_big + b_small) = a_big x b_big + a_big x b_small + a_small x b_big big = convert_to_tf32(fp32) small = convert_to_tf32(fp32 - big) a_small x b_small is discarded because they are too small. This example demonstrates usage of this kernel, along with accuracy measurements w.r.t. actual FP32 results (SGEMM using SIMT) and against FP64 results (DGEMM) To enable this feature, the only change needs to make is to change the default OpMultiplyAdd to OpMultiplyAddFastF32. Now, we have several different flavors of sgemm now in the profiler for Ampere. Here are the difference sgemm // CUDA core SIMT kernel. FP32 in, accumulated in FP32, FP32 out. s1688gemm // Use 3xTF32 to emulate FP32. FP32 in, converted in TF32-big and TF32-small internally, // accumulated in FP32, FP32 out. s1688tf32gemm // Use 1xTF32. FP32 in, converted to one TF32 internally, accumulated in FP32, FP32 out. s1688gemm_tf32 // TF32 in, accumulated in FP32, FP32 out. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; int m, n, k; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; // ctor Result( int m, int n, int k, double runtime_ms, double gflops, double l2_norm_3xtf32_vs_fp64, double l2_norm_1xtf32_vs_fp64, double l2_norm_fp32_vs_fp64) : m(m), n(n), k(k), runtime_ms(runtime_ms), gflops(gflops), l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64), l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64), l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {} Result() {} // // Methods // static void print_csv_header() { std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl; } void print_csv_row() { std::cout << m << "," << n << "," << k << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64 << std::endl; } }; std::vector<Result> results; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int iterations; int seed; bool benchmark; Options(): help(false), problem_size({3456, 4096, 4096}), iterations(20), seed(1), alpha(1), beta(), rand_mode("uniform"), benchmark(false) { } bool valid() { // // CUTLASS attempts to load 128b vectors of F32 elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 4 elements. // int const kAlignment = 4; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "27_ampere_3xtf32_fast_accurate_tensorop_gemm example\n\n" << " This example uses the CUTLASS Library to emulate FP32 with TF32 tensorop GEMM computations.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product(); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< float, // <- data type of output matrix 128 / cutlass::sizeof_bits<float>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too float, // <- data type of accumulator float>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Alignment constexpr int Alignment = 4; // // Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64) // // Gemm_3xTF32 using Gemm_3xTF32 = cutlass::gemm::device::Gemm< float, LayoutInputA, float, LayoutInputB, float, LayoutOutput, float, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, Alignment, Alignment, false, cutlass::arch::OpMultiplyAddFastF32>; // Gemm_1xTF32 using Gemm_1xTF32 = cutlass::gemm::device::Gemm< float, LayoutInputA, float, LayoutInputB, float, LayoutOutput, float, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, Alignment, Alignment, false, cutlass::arch::OpMultiplyAdd>; // Gemm_F64 using Gemm_F64 = cutlass::reference::device::Gemm< double, LayoutInputA, double, LayoutInputB, double, LayoutOutput, double, double>; // Gemm_F32 using Gemm_F32 = cutlass::reference::device::Gemm< float, LayoutInputA, float, LayoutInputB, float, LayoutOutput, float, float>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors using the same values used for F32 //////////////////////////////////////////////////////////////////////////////// // Gemm input operands (A, B, C) cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Gemm output (D) for GEMM_F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_3xTF32 cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_1xTF32 cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // Initialize alpha and beta for dot product computation float alpha = float(options.alpha); float beta = float(options.beta); // Split K dimension into 1 partitions int split_k_slices = 1; //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_3xTF32 gemm_op_3xTF32; // Check the problem size is supported or not cutlass::Status status_3xtf32 = gemm_op_3xTF32.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = gemm_op_3xTF32.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Result structure Result result; // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } } // Record an event at the start of a series of GEMMs result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // Launch initialized CUTLASS kernel status_3xtf32 = gemm_op_3xTF32(); CUTLASS_CHECK(status_3xtf32); } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Compute average runtime and GFLOPs. result.m = problem_size.m(); result.n = problem_size.n(); result.k = problem_size.k(); result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run TF32 kernel without profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_1xTF32 gemm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = gemm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// // Create instantiation for device reference gemm kernel Gemm_F64 gemm_f64; // Launch device reference gemm kernel gemm_f64(problem_size, alpha, tensor_a_F64.device_ref(), tensor_b_F64.device_ref(), beta, tensor_c_F64.device_ref(), tensor_d_F64.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// // Create instantiation for device reference gemm kernel Gemm_F32 gemm_f32; // Launch device reference gemm kernel gemm_f32(problem_size, alpha, tensor_a_F32.device_ref(), tensor_b_F32.device_ref(), beta, tensor_c_F32.device_ref(), tensor_d_F32.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); results.push_back(result); /////////////////////////////////////////////////////////////////////////////// // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << std::fixed; std::cout.precision(4); std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout.precision(2); std::cout << "GFLOPs: " << result.gflops << std::endl; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific << " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl << " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl << " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (options.benchmark) { for (int k = 4; k <= 65536; k *= 2) { options.problem_size[2] = k; printf("Gemm problem size: %d x %d x %d\n", \ options.problem_size.m(), options.problem_size.n(), options.problem_size.k()); if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result &= run(options); } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); } if (!result) return -1; std::cout << std::endl << "CSV results" << std::endl; Result::print_csv_header(); for(auto &r : results) r.print_csv_row(); return 0; }
cutlass/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu/0
{ "file_path": "cutlass/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu", "repo_id": "cutlass", "token_count": 12993 }
8
# PyCUTLASS Examples This directory contains deprecated examples for PyCUTLASS, a precursor to the CUTLASS Python interface. For examples of using CUTLASS's actively-maintained Pythonic interface, see the [examples/python](/examples/python) directory.
cutlass/examples/40_cutlass_py/README.md/0
{ "file_path": "cutlass/examples/40_cutlass_py/README.md", "repo_id": "cutlass", "token_count": 68 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Grouped FMHA kernel */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/layout/matrix.h" #include "cutlass/trace.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "fmha_grouped_problem_visitor.h" #include "gemm_kernel_utils.h" #include "gemm/mma_accum_lambda_iterator.h" #include "epilogue/epilogue_rescale_output.h" namespace { static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value) { // source: https://stackoverflow.com/a/51549250 return (value >= 0) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) : __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value))); } } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename MM0_, ///! Structure for computing P = Q @ K typename MM1_, ///! Structure for computing O = P @ V typename scalar_t_, typename accum_t_, typename output_t_, typename output_accum_t_, bool kKeepOutputInRF, ///! Whether the intermediate output from MM0_ should be kept in the register file GroupScheduleMode GroupScheduleMode_ ///! Type of scheduling to perform > struct FMHAGrouped { public: using MM0 = MM0_; using MM1 = MM1_; using scalar_t = scalar_t_; using accum_t = accum_t_; using output_t = output_t_; using output_accum_t = output_accum_t_; static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; static constexpr bool kNeedsOutputAccumulatorBuffer = !kKeepOutputInRF && !cutlass::platform::is_same<output_accum_t, output_t>::value; // Parameters to satisfy BaseGrouped using ElementA = scalar_t; using ElementB = scalar_t; using ElementC = accum_t; using LayoutA = typename MM0::LayoutA; using LayoutB = typename MM0::ElementB; using LayoutC = typename MM1::ElementC; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; static int const kAlignmentA = MM0::kAlignmentA; static int const kAlignmentB = MM0::kAlignmentB; static int const kAlignmentC = 1; using Mma = typename MM1::Mma; using EpilogueOutputOp = typename MM1::EpilogueOutputOp; using ThreadblockSwizzle = void; using Operator = typename MM1::Operator; using WarpShape = typename MM1::WarpShape; using InstructionShape = typename MM1::InstructionShape; using ElementQ = scalar_t; using ElementK = scalar_t; using ElementP = accum_t; using ElementV = scalar_t; using ElementO = output_t; using ElementOAccum = output_accum_t; using ElementAccumulator = accum_t; using LayoutQ = typename MM0::LayoutA; using LayoutK = typename MM0::LayoutB; using LayoutP = typename MM0::LayoutC; using LayoutV = typename MM1::LayoutB; using LayoutO = typename MM1::LayoutC; static bool const kPreloadV = (MM1::Mma::ArchTag::kMinComputeCapability >= 80 && cutlass::sizeof_bits<ElementV>::value == 16); static int const kAlignmentQ = MM0::kAlignmentA; static int const kAlignmentK = MM0::kAlignmentB; static int const kAlignmentV = 1; using ThreadblockShape = typename MM0::ThreadblockShape; static int const kQueriesPerBlock = ThreadblockShape::kM; static int const kKeysPerBlock = ThreadblockShape::kN; static constexpr bool kSupportsDropout = false; static constexpr bool kSupportsBias = false; /// Warp count (concept: GemmShape) using WarpCount = typename MM1::WarpCount; static int const kThreadsPerWarp = 32; static int const kThreadCount = kThreadsPerWarp * WarpCount::kCount; static constexpr int kNumWarpsPerBlock = kQueriesPerBlock * kKeysPerBlock / (kThreadsPerWarp * kThreadsPerWarp); using ProblemVisitor = FMHAGroupedProblemVisitor< ThreadblockShape, kGroupScheduleMode, kThreadCount, kThreadCount>; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmCoord *problem_sizes0{nullptr}; GemmCoord *problem_sizes1{nullptr}; int problem_count{0}; int threadblock_count{0}; ElementQ ** ptr_Q{nullptr}; ElementK ** ptr_K{nullptr}; ElementP ** ptr_P{nullptr}; ElementV ** ptr_V{nullptr}; ElementO ** ptr_O{nullptr}; ElementOAccum ** ptr_O_accum{nullptr}; typename LayoutQ::Stride::LongIndex *ldq{nullptr}; typename LayoutK::Stride::LongIndex *ldk{nullptr}; typename LayoutP::Stride::LongIndex *ldv{nullptr}; typename LayoutO::Stride::LongIndex *ldo{nullptr}; // Whether causal masking is to be performed bool causal{false}; // Scale ElementAccumulator scale{0}; // Only used by device-level operator GemmCoord *host_problem_sizes{nullptr}; // // Methods // /// Default ctor Arguments() = default; /// Ctor CUTLASS_HOST_DEVICE Arguments( GemmCoord *problem_sizes0, GemmCoord *problem_sizes1, int problem_count, int threadblock_count, ElementQ ** ptr_Q, ElementK ** ptr_K, ElementP ** ptr_P, ElementV ** ptr_V, ElementO ** ptr_O, ElementOAccum ** ptr_O_accum, typename LayoutQ::Stride::LongIndex *ldq, typename LayoutK::Stride::LongIndex *ldk, typename LayoutP::Stride::LongIndex *ldp, typename LayoutV::Stride::LongIndex *ldv, typename LayoutO::Stride::LongIndex *ldo, bool causal, ElementAccumulator scale, GemmCoord *host_problem_sizes=nullptr ): problem_sizes0(problem_sizes0), problem_sizes1(problem_sizes1), problem_count(problem_count), threadblock_count(threadblock_count), ptr_Q(ptr_Q), ptr_K(ptr_K), ptr_P(ptr_P), ptr_V(ptr_V), ptr_O(ptr_O), ptr_O_accum(kNeedsOutputAccumulatorBuffer ? ptr_O_accum : (accum_t**)ptr_O), ldq(ldq), ldk(ldk), ldv(ldv), ldo(ldo), causal(causal), scale(scale), host_problem_sizes(host_problem_sizes) { } bool __host__ check_supported() { CHECK_ALIGNED_PTR(ptr_Q, kAlignmentQ); CHECK_ALIGNED_PTR(ptr_K, kAlignmentK); CHECK_ALIGNED_PTR(ptr_V, kAlignmentV); XFORMERS_CHECK(ldq % kAlignmentQ == 0, "query is not correctly aligned"); XFORMERS_CHECK(ldk % kAlignmentK == 0, "key is not correctly aligned"); XFORMERS_CHECK(ldv % kAlignmentV == 0, "value is not correctly aligned"); return true; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { typename ProblemVisitor::Params problem_visitor; int threadblock_count; ElementQ ** ptr_Q; ElementK ** ptr_K; ElementP ** ptr_P; ElementV ** ptr_V; ElementO ** ptr_O; ElementOAccum ** ptr_O_accum; typename LayoutQ::Stride::LongIndex *ldq; typename LayoutK::Stride::LongIndex *ldk; typename LayoutP::Stride::LongIndex *ldv; typename LayoutO::Stride::LongIndex *ldo; ElementAccumulator scale; bool causal; // // Methods // CUTLASS_HOST_DEVICE Params(): ptr_Q(nullptr), ptr_K(nullptr), ptr_P(nullptr), ptr_V(nullptr), ptr_O(nullptr), ptr_O_accum(nullptr), ldq(nullptr), ldk(nullptr), ldv(nullptr), ldo(nullptr), causal(false), scale(0) { } CUTLASS_HOST_DEVICE Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0): problem_visitor(args.problem_sizes0, args.problem_sizes1, args.problem_count, workspace, tile_count), threadblock_count(args.threadblock_count), ptr_Q(args.ptr_Q), ptr_K(args.ptr_K), ptr_P(args.ptr_P), ptr_V(args.ptr_V), ptr_O(args.ptr_O), ptr_O_accum(kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O), ldq(args.ldq), ldk(args.ldk), ldv(args.ldv), ldo(args.ldo), causal(args.causal), scale(args.scale) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr, int tile_count = 0) { problem_visitor = typename ProblemVisitor::Params(args.problem_sizes0, args.problem_sizes1, args.problem_count, workspace, tile_count); threadblock_count = args.threadblock_count; ptr_Q = args.ptr_Q; ptr_K = args.ptr_K; ptr_P = args.ptr_P; ptr_V = args.ptr_V; ptr_O = args.ptr_O; ptr_O_accum = kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O; ldq = args.ldq; ldk = args.ldk; ldv = args.ldv; ldo = args.ldo; causal = args.causal; scale = args.scale; } }; // Shared storage - depends on kernel params struct ScalingCoefs { cutlass::Array<ElementAccumulator, kQueriesPerBlock> m_prime; cutlass::Array<ElementAccumulator, kQueriesPerBlock> s_prime; cutlass::Array<ElementAccumulator, kQueriesPerBlock> mi; cutlass::Array<ElementAccumulator, kQueriesPerBlock> out_rescale; cutlass::Array<ElementAccumulator, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN> addition_storage; }; struct SharedStorageEpilogueAtEnd : ScalingCoefs { struct SharedStorageAfterMM0 { // Everything here might be overwritten during MM0 typename MM0::AccumulatorSharedStorage si; typename MM1::Mma::SharedStorage mm1; }; union { typename MM0::Mma::SharedStorage mm0; SharedStorageAfterMM0 after_mm0; typename MM1::DefaultEpilogue::SharedStorage epilogue; }; CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage() { return epilogue; } // ProblemVisitor shared storage can't be overlapped with others typename ProblemVisitor::SharedStorage problem_visitor; }; struct SharedStorageEpilogueInLoop : ScalingCoefs { struct SharedStorageAfterMM0 { // Everything here might be overwritten during MM0 typename MM0::AccumulatorSharedStorage si; typename MM1::Mma::SharedStorage mm1; typename MM1::DefaultEpilogue::SharedStorage epilogue; }; union { typename MM0::Mma::SharedStorage mm0; SharedStorageAfterMM0 after_mm0; }; CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage() { return after_mm0.epilogue; } // ProblemVisitor shared storage can't be overlapped with others typename ProblemVisitor::SharedStorage problem_visitor; }; using SharedStorage = typename cutlass::platform::conditional< kKeepOutputInRF, SharedStorageEpilogueAtEnd, SharedStorageEpilogueInLoop>::type; private: // Parameters to be used by an individual tile struct TileParams { CUTLASS_HOST_DEVICE static int query_start(int threadblock_idx) { return threadblock_idx * kQueriesPerBlock; } // Returns whether this threadblock computes within the number of queries, // which is determined by the M dimension of problem 0 CUTLASS_HOST_DEVICE static bool can_compute(int threadblock_idx, const GemmCoord& problem_size0) { return query_start(threadblock_idx) < problem_size0.m(); } CUTLASS_HOST_DEVICE static int num_queries(int threadblock_idx, const GemmCoord& problem_size0) { return problem_size0.m() - query_start(threadblock_idx); } CUTLASS_HOST_DEVICE static int num_keys(int threadblock_idx, const GemmCoord& problem_size0, bool causal) { int nk = problem_size0.n(); if (causal) { nk = cutlass::fast_min(int32_t(query_start(threadblock_idx) + kQueriesPerBlock), nk); } return nk; } }; public: // // Methods // CUTLASS_DEVICE FMHAGrouped() { } /// Determines whether kernel satisfies alignment static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { return Status::kSuccess; } static Status can_implement(Arguments const &args) { return Status::kSuccess; } static CUTLASS_DEVICE int16_t thread_id() { return threadIdx.x; } static CUTLASS_DEVICE int8_t warp_id() { return threadIdx.x / kThreadsPerWarp; } static CUTLASS_DEVICE int8_t lane_id() { return threadIdx.x % kThreadsPerWarp; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { auto& m_prime = shared_storage.m_prime; auto& s_prime = shared_storage.s_prime; [[maybe_unused]] auto& si = shared_storage.after_mm0.si; auto& mi = shared_storage.mi; auto& out_rescale = shared_storage.out_rescale; ProblemVisitor problem_visitor( params.problem_visitor, shared_storage.problem_visitor, blockIdx.x); // Outer 'persistent' loop to iterate over tiles while (problem_visitor.next_tile()) { GemmCoord problem_size0 = problem_visitor.problem_size0(); GemmCoord problem_size1 = problem_visitor.problem_size1(); const int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); if (!TileParams::can_compute(threadblock_idx, problem_size0)) { problem_visitor.advance(gridDim.x); continue; } const int32_t problem_idx = problem_visitor.problem_index(); if (thread_id() < kQueriesPerBlock) { s_prime[thread_id()] = ElementAccumulator(0); out_rescale[thread_id()] = accum_t(1.0); m_prime[thread_id()] = -cutlass::platform::numeric_limits<ElementAccumulator>::infinity(); mi[thread_id()] = -cutlass::platform::numeric_limits<ElementAccumulator>::infinity(); } ElementO *ptr_O = params.ptr_O[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx]; ElementOAccum *ptr_O_accum = params.ptr_O_accum[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx]; const int num_queries = TileParams::num_queries(threadblock_idx, problem_size0); auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator { using OutputTileIterator = typename MM1::OutputTileIterator; return OutputTileIterator( typename OutputTileIterator::Params{(int32_t)params.ldo[problem_idx]}, ptr_O, typename OutputTileIterator::TensorCoord{ num_queries, problem_size1.n()}, thread_id(), {0, col}); }; auto createOutputAccumIter = [&](int col) -> typename MM1::OutputTileIteratorAccum { using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum; return OutputTileIteratorAccum( typename OutputTileIteratorAccum::Params{(int32_t)params.ldo[problem_idx]}, ptr_O_accum, typename OutputTileIteratorAccum::TensorCoord{ num_queries, problem_size1.n()}, thread_id(), {0, col}); }; typename MM1::Mma::FragmentC accum_o; accum_o.clear(); const int num_keys = TileParams::num_keys(threadblock_idx, problem_size0, params.causal); for (int32_t iter_key_start = 0; iter_key_start < num_keys; iter_key_start += kKeysPerBlock) { int32_t problem_size_0_m = cutlass::fast_min((int32_t)kQueriesPerBlock, num_queries); int32_t problem_size_0_n = cutlass::fast_min( (int32_t)kKeysPerBlock, num_keys - iter_key_start); int32_t const& problem_size_0_k = problem_size0.k(); int32_t const& problem_size_1_n = problem_size1.n(); int32_t const& problem_size_1_k = problem_size_0_n; auto prologueV = [&](int blockN) { typename MM1::Mma::IteratorB iterator_V( typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])}, params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx], {problem_size_1_k, problem_size_1_n}, thread_id(), cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN}); MM1::Mma::prologue( shared_storage.after_mm0.mm1, iterator_V, thread_id(), problem_size_1_k); }; __syncthreads(); // Need to have shared memory initialized, and `m_prime` // updated from end of prev iter // // MATMUL: Q.K_t // // Computes the block-matrix product of: // (a) query[query_start:query_end, :] // with // (b) key[iter_key_start:iter_key_start + kKeysPerBlock] // and stores that into `shared_storage.si` // ElementQ *ptr_Q = params.ptr_Q[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldq[problem_idx]; // Construct iterators to A and B operands typename MM0::IteratorA iterator_A( typename MM0::IteratorA::Params( typename MM0::MmaCore::LayoutA(params.ldq[problem_idx])), ptr_Q, {problem_size_0_m, problem_size_0_k}, thread_id(), {0, 0}); typename MM0::IteratorB iterator_B( typename MM0::IteratorB::Params( typename MM0::MmaCore::LayoutB(params.ldk[problem_idx])), params.ptr_K[problem_idx] + iter_key_start * params.ldk[problem_idx], {problem_size_0_k, problem_size_0_n}, thread_id(), {0, 0}); // Construct thread-scoped matrix multiply typename MM0::Mma mma( shared_storage.mm0, thread_id(), warp_id(), lane_id()); typename MM0::Mma::FragmentC accum; accum.clear(); auto gemm_k_iterations = (problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); __syncthreads(); if (kPreloadV) { prologueV(0); } else { MM1::Mma::drain_cp_asyncs(); } typename MM0::Mma::Operator::IteratorC::TensorCoord iteratorC_tile_offset = { (warp_id() % MM0::Mma::WarpCount::kM), (warp_id() / MM0::Mma::WarpCount::kM) }; // Mask out last if causal if (params.causal && num_keys - iter_key_start <= kKeysPerBlock) { auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset( lane_id(), warp_id(), iteratorC_tile_offset); int32_t last_col; MM0::AccumLambdaIterator::iterateRows( lane_offset, [&](int accum_m) { last_col = TileParams::query_start(threadblock_idx) + accum_m - iter_key_start; }, [&](int accum_m, int accum_n, int idx) { if (accum_n > last_col) { accum[idx] = -cutlass::platform::numeric_limits<accum_t>::infinity(); } }, [&](int accum_m) {}); } // DISPATCH_BOOL(iter_key_start == 0, kIsFirst, ([&] { // DISPATCH_BOOL( // num_keys - iter_key_start >= kKeysPerBlock, // kFullColumns, // ([&] { // // Update `mi` from accum stored in registers // // Also does accum[i] <- exp(accum[i] - mi) // iterative_softmax< // typename MM0::Mma::Operator::IteratorC, // kFullColumns, // kIsFirst>( // accum_o, // accum, // mi, // m_prime, // s_prime, // lane_id(), // thread_id(), // warp_id(), // num_keys - iter_key_start, // iteratorC_tile_offset, // kSupportsBias ? 1.0f : params.scale); // })); // })); // Update `mi` from accum stored in registers // Also does accum[i] <- exp(accum[i] - mi) iterative_softmax<typename MM0::Mma::Operator::IteratorC>( accum_o, accum, mi, m_prime, s_prime, out_rescale, shared_storage.addition_storage, lane_id(), thread_id(), warp_id(), num_keys - iter_key_start, iter_key_start == 0, iteratorC_tile_offset, kSupportsBias ? 1.0f : params.scale); // Output results to shared-memory int warp_idx_mn_0 = warp_id() % (MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN); auto output_tile_coords = cutlass::MatrixCoord{ warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM, warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM}; MM0::B2bGemm::accumToSmem( shared_storage.after_mm0.si, accum, lane_id(), output_tile_coords); __syncthreads(); // // MATMUL: Attn . V // Run the matmul `attn @ V` for a block of attn and V. // `attn` is read from shared memory (in `shared_storage_si`) // `V` is read from global memory (with iterator_B) // const int64_t nBlockN = kKeepOutputInRF ? 1 : ceil_div( (int64_t)problem_size_1_n, int64_t(MM1::ThreadblockShape::kN)); // Iterate over the N dimension of GEMM1 for (int blockN = 0; blockN < nBlockN; ++blockN) { int gemm_k_iterations = (problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add and store it in accum // (in registers) if (!kPreloadV) { __syncthreads(); // we share shmem between mma and epilogue } typename MM1::Mma::IteratorB iterator_V( typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])}, params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx], {problem_size_1_k, problem_size_1_n}, thread_id(), cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN}); typename MM1::Mma mma_pv( // operand A: Pij_dropped in shared memory shared_storage.after_mm0.si.accum_ref(), // operand B: shared memory staging area for Vj, which is loaded // from global memory shared_storage.after_mm0.mm1.operand_B_ref(), (int)thread_id(), (int)warp_id(), (int)lane_id()); mma_pv.set_prologue_done(kPreloadV); if (!kKeepOutputInRF) { accum_o.clear(); } mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o); __syncthreads(); if (kPreloadV && !kKeepOutputInRF && blockN + 1 < nBlockN) { prologueV(blockN + 1); } if (!kKeepOutputInRF) { MM1::Mma::drain_cp_asyncs(); DISPATCH_BOOL( iter_key_start == 0, kIsFirst, ([&] { DISPATCH_BOOL( (iter_key_start + kKeysPerBlock) >= num_keys, kIsLast, ([&] { using DefaultEpilogue = typename MM1::DefaultEpilogue; using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp; using ElementCompute = typename DefaultOp::ElementCompute; using EpilogueOutputOp = typename cutlass::epilogue:: thread::MemoryEfficientAttentionNormalize< typename cutlass::platform::conditional< kIsLast, output_t, output_accum_t>::type, output_accum_t, DefaultOp::kCount, typename DefaultOp::ElementAccumulator, output_accum_t, kIsFirst, kIsLast, cutlass::Array<ElementCompute, kQueriesPerBlock>>; using Epilogue = typename cutlass::epilogue::threadblock:: EpiloguePipelined< typename DefaultEpilogue::Shape, typename MM1::Mma::Operator, DefaultEpilogue::kPartitionsK, typename cutlass::platform::conditional< kIsLast, typename MM1::OutputTileIterator, typename MM1::OutputTileIteratorAccum>::type, typename DefaultEpilogue:: AccumulatorFragmentIterator, typename DefaultEpilogue::WarpTileIterator, typename DefaultEpilogue::SharedLoadIterator, EpilogueOutputOp, typename DefaultEpilogue::Padding, DefaultEpilogue::kFragmentsPerIteration, true, // IterationsUnroll typename MM1::OutputTileIteratorAccum // Read // iterator >; int col = blockN * MM1::Mma::Shape::kN; auto source_iter = createOutputAccumIter(col); auto dest_iter = gemm_kernel_utils::call_conditional< kIsLast, decltype(createOutputIter), decltype(createOutputAccumIter)>:: apply(createOutputIter, createOutputAccumIter, col); EpilogueOutputOp rescale(s_prime, out_rescale); Epilogue epilogue( shared_storage.epilogue_shared_storage(), thread_id(), warp_id(), lane_id()); epilogue(rescale, dest_iter, accum_o, source_iter); })); })); if (!kKeepOutputInRF) { __syncthreads(); } } } __syncthreads(); // we modify `m_prime` after } if (kKeepOutputInRF) { const bool kIsFirst = true; const bool kIsLast = true; using DefaultEpilogue = typename MM1::DefaultEpilogue; using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp; using ElementCompute = typename DefaultOp::ElementCompute; using EpilogueOutputOp = typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize< output_t, // output output_accum_t, // source DefaultOp::kCount, typename DefaultOp::ElementAccumulator, // accum output_accum_t, // compute kIsFirst, kIsLast, cutlass::Array<ElementCompute, kQueriesPerBlock>>; using Epilogue = typename cutlass::epilogue::threadblock::EpiloguePipelined< typename DefaultEpilogue::Shape, typename MM1::Mma::Operator, DefaultEpilogue::kPartitionsK, typename MM1::OutputTileIterator, // destination typename DefaultEpilogue::AccumulatorFragmentIterator, typename DefaultEpilogue::WarpTileIterator, typename DefaultEpilogue::SharedLoadIterator, EpilogueOutputOp, typename DefaultEpilogue::Padding, DefaultEpilogue::kFragmentsPerIteration, true, // IterationsUnroll typename MM1::OutputTileIteratorAccum // source tile >; auto dest_iter = createOutputIter(0); EpilogueOutputOp rescale(s_prime, out_rescale); Epilogue epilogue( shared_storage.epilogue_shared_storage(), thread_id(), warp_id(), lane_id()); MM1::Mma::drain_cp_asyncs(); epilogue(rescale, dest_iter, accum_o); } // Next tile problem_visitor.advance(gridDim.x); __syncthreads(); // Don't start the next iteration until all threads are done using shared memory. } } template <typename WarpIteratorC> CUTLASS_DEVICE static void iterative_softmax( typename WarpIteratorC::Fragment& frag_o, // output so far typename WarpIteratorC::Fragment& frag, cutlass::Array<accum_t, kQueriesPerBlock>& mi, cutlass::Array<accum_t, kQueriesPerBlock>& m_prime, cutlass::Array<accum_t, kQueriesPerBlock>& s_prime, cutlass::Array<accum_t, kQueriesPerBlock>& out_rescale, cutlass::Array<accum_t, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>& addition_storage, int8_t lane_id, int8_t thread_id, int8_t warp_id, int max_col, bool is_first, typename WarpIteratorC::TensorCoord const& tile_offset, float scaling) { /* Iterates on the accumulator and corresponding position on result matrix (1) Update `mi[r]` to the max value of the row `r` (2) In a second iteration do the following: (a) accum <- exp(accum - mi) (b) m_prime <- exp(m_prime - mi) (c) s_prime <- s_prime * m_prime + sum(accum) All of this is done on registers, before we store all of this on shared memory for the next matmul with Value. */ using Fragment = typename WarpIteratorC::Fragment; using LambdaIterator = typename DefaultMmaAccumLambdaIterator< WarpIteratorC, accum_t, kThreadsPerWarp>::Iterator; // Convert to `accum_t` (rather than double) constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E static_assert(kQueriesPerBlock % kNumWarpsPerBlock == 0, ""); static constexpr int kLinesPerWarp = kQueriesPerBlock / kNumWarpsPerBlock; frag = cutlass::multiplies<Fragment>()(scaling * kLog2e, frag); auto lane_offset = LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset); // First update `mi` to the max per-row { accum_t max; LambdaIterator::iterateRows( lane_offset, [&](int accum_m) { max = -cutlass::platform::numeric_limits<accum_t>::infinity(); }, [&](int accum_m, int accum_n, int idx) { if (accum_n < max_col) { max = cutlass::fast_max(max, frag[idx]); } }, [&](int accum_m) { // Having 4x atomicMax seems faster than reduce within warp // first... atomicMaxFloat(&mi[accum_m], max); }); } // Make sure we all share the update values for `mi` __syncthreads(); // Doing this `exp` is quite expensive. Let's // split it across the warps bool restore_mi_to_minus_inf = false; if (lane_id < kLinesPerWarp) { int id = warp_id * kLinesPerWarp + lane_id; auto m_prime_id = m_prime[id]; auto mi_id = mi[id]; bool changed = m_prime_id < mi_id; // `false` if both are -inf if (changed) { auto m_prime_exp = exp2f(m_prime_id - mi_id); out_rescale[id] = m_prime_exp; s_prime[id] *= m_prime_exp; } else { // Only when bias is enabled, it's possible that all the first values // of attention are masked to `-inf`. In that case we want to avoid // `nan = exp2f(-inf - (-inf))` so we temporarily set `mi` to 0 if (kSupportsBias && mi_id == -cutlass::platform::numeric_limits<accum_t>::infinity()) { restore_mi_to_minus_inf = true; mi[id] = 0.0f; } out_rescale[id] = 1.0f; } } __syncthreads(); // Update output fragments if (kKeepOutputInRF && !is_first) { accum_t line_rescale; LambdaIterator::iterateRows( lane_offset, [&](int accum_m) { line_rescale = out_rescale[accum_m]; }, [&](int accum_m, int accum_n, int idx) { frag_o[idx] = frag_o[idx] * line_rescale; }, [&](int accum_m) {}); } // Update accum_m, accum_n, ... { accum_t mi_row, total_row; LambdaIterator::iterateRows( lane_offset, [&](int accum_m) { mi_row = mi[accum_m]; }, [&](int accum_m, int accum_n, int idx) { frag[idx] = (accum_n < max_col) ? exp2f(frag[idx] - mi_row) : accum_t(0.0); }, [&](int accum_m) {}); LambdaIterator::iterateRows( lane_offset, [&](int accum_m) { total_row = 0.0; }, [&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; }, [&](int accum_m) { if (LambdaIterator::reduceSameRow( lane_id, total_row, [](accum_t a, accum_t b) { return a + b; })) { // NOTE: we could atomically add `total_row` to `s_prime`, but // it's faster (and deterministic) to avoid atomics here addition_storage [accum_m + kQueriesPerBlock * tile_offset.column()] = total_row; } }); } __syncthreads(); if (lane_id < kLinesPerWarp) { int id = warp_id * kLinesPerWarp + lane_id; accum_t total_row = s_prime[id]; if (restore_mi_to_minus_inf) { // Restore `mi`, see above when we set `restore_mi_to_minus_inf=true` mi[id] = -cutlass::platform::numeric_limits<accum_t>::infinity(); } else { m_prime[id] = mi[id]; } CUTLASS_PRAGMA_UNROLL for (int i = 0; i < MM0::MmaCore::WarpCount::kN; ++i) { total_row += addition_storage[id + kQueriesPerBlock * i]; } s_prime[id] = total_row; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/fmha_grouped.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/fmha_grouped.h", "repo_id": "cutlass", "token_count": 17648 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses. The first tile this iterator visits maybe partial, then the remaining tiles are complete. So, we only need to compute the predicates twice, once before the first tile and once for the remaining full tiles which can share the same predicates. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIteratorResidualLast /// template < typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType, bool Gather = false> class PredicatedTileAccessIteratorResidualLast; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear /// data. /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert( !(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); using Mask = typename UnderlyingPredicates::Mask; /// Uses a non-template class struct Params : PredicatedTileAccessIteratorParams { using Base = PredicatedTileAccessIteratorParams; // Default ctor CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : Base( layout.stride(0), MakePredicatedTileAccessIteratorDesc< Shape, Element, Layout, kAdvanceRank, ThreadMap>()()) {} CUTLASS_HOST_DEVICE Params(Base const& base) : Base(base) {} }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char*; private: // // Data members // UnderlyingPredicates the_predicates; Mask residual_tile_mask; /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Below is used when Gather is turned on. We need to record strided_offset /// and contiguous_offset separated to compute the offset by using /// /// offset = contiguous_offset + indices[strided_offset] /// /// Gather indices int const* indices_; Index gather_offset_strided; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, /// Gather indices int const* indices = nullptr) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent), indices_(indices) { the_predicates.set_predicates(thread_id, threadblock_offset); the_predicates.get_mask(residual_tile_mask); // Working around a weird compiler bug happening on P100 for the backward. // I've seen together: the_predicates.predicates_[0] = 14 (instead of 15) // residual_tile_mask[0] = 15 (correct) // // Adding prints when the value is calculated (in `compute_predicates_`) // sometimes removes the bug. The consequence is that we skip some // element of a tensor, leading to wrong results // Setting `compute_predicates_`'s second argument (`is_steady_state`) to // true also seems to get rid of the bug - at the cost of twice as many // comparisons. #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) constexpr bool kWorkAroundCompilerBug = false; #else constexpr bool kWorkAroundCompilerBug = true; #endif the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug); // update internal pointers Layout layout(params_.stride_); if (!Gather) { add_pointer_offset(layout(the_predicates.thread_offset_)); } else { gather_offset_strided = the_predicates.thread_offset_.strided(); add_pointer_offset( layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0))); } } /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool is_residual_tile) { if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { if (!Gather) { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { add_pointer_offset(Shape::kContiguous * tile_offset.contiguous()); gather_offset_strided += Shape::kStrided * tile_offset.strided(); } } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { if (Gather) { assert(indices_); if (!valid()) { return nullptr; } LongIndex contiguous_offset = the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + the_predicates.iteration_vector_; int strided_index = gather_offset_strided + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided; LongIndex strided_offset = indices_[strided_index] * LongIndex(params_.stride_) * sizeof_bits<Element>::value / 8; return reinterpret_cast<AccessType*>( pointer_ + contiguous_offset + strided_offset); } return reinterpret_cast<AccessType*>( pointer_ + the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + the_predicates.iteration_vector_; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { if (!Gather) { pointer_ += params_.inc_strided_; } return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; if (!Gather) { // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, // this subtraction as well as the subsequent integer addition are both // elided by the compiler. pointer_ -= params_.inc_advance_; } return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() const { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for column-major /// data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType, Gather>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( ///< Precomputed parameters object Params const& params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord( threadblock_offset.row(), threadblock_offset.column()), indices) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for row-major /// data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType, Gather>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( ///< Precomputed parameters object Params const& params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const& threadblock_offset, /// Gather indices int const* indices = nullptr) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord( threadblock_offset.column(), threadblock_offset.row()), indices) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2 /// data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::AffineRankN<2>, AdvanceRank, ThreadMap_, AccessType_, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRankN<2>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert( !(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingPredicates::Mask; /// Parameters object is precomputed state and is host-constructible class Params { public: friend PredicatedTileAccessIteratorResidualLast; private: /// stride of pitch-linear layout (units of Element) Coord<Layout::kStrideRank, Layout::LongIndex> stride_; /// amount (in byte) to increment pointer to move to next access along /// contiguous dimension LongIndex inc_contiguous_; /// amount (in byte) to increment pointer from first access of current /// contiguous dimension to first access of next one. LongIndex inc_strided_; /// amount (in byte) to increment pointer from last access of current /// contiguous dimension to first access of next one. LongIndex inc_next_strided_; /// amount (in byte) to increment pointer from last access to first access /// of next tile LongIndex inc_next_; /// amount (in byte) to increment pointer from first access of current tile /// to first access of next tile LongIndex inc_advance_; public: // Default ctor CUTLASS_HOST_DEVICE Params() : stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : stride_({layout.stride(0), layout.stride(1)}) { inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) * sizeof_bits<Element>::value / 8; inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) * sizeof_bits<Element>::value / 8; inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_; if (kAdvanceRank) { // advance along strided dimension inc_advance_ = Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8; } else { // advance along contiguous dimension inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8; } inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_; }; }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char*; // // Data members // /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; UnderlyingPredicates the_predicates; Mask residual_tile_mask; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( ///< Precomputed parameters object Params const& params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent) { the_predicates.set_predicates(thread_id, threadblock_offset); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(the_predicates.thread_offset_)); } /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool is_residual_tile) { if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]); pointer_ += Shape::kContiguous * tile_offset[0]; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]); pointer_ += Shape::kStrided * tile_offset[1]; } } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(pointer_) + the_predicates.iteration_vector_; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { pointer_ += params_.inc_contiguous_; return *this; } // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_next_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2 /// column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::AffineRank2ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( ///< Precomputed parameters object Params const& params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord( threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset( make_Coord(tile_offset.row(), tile_offset.column())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2 /// row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::AffineRank2RowMajor, AdvanceRank, ThreadMap_, AccessType_, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( ///< Precomputed parameters object Params const& params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord( threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset( make_Coord(tile_offset.column(), tile_offset.row())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for column-major /// interleaved data. It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape< Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord( extent.row() * kInterleavedK, extent.column() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.row() * kInterleavedK, threadblock_offset.column() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIteratorResidualLast for row-major /// interleaved data. // It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIteratorResidualLast< Shape_, Element_, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::RowMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< layout::PitchLinearShape< Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord( extent.column() * kInterleavedK, extent.row() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.column() * kInterleavedK, threadblock_offset.row() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const& tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorResidualLast operator++(int) { PredicatedTileAccessIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h", "repo_id": "cutlass", "token_count": 22473 }
11
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator without splitk template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename OutputOp_ ///< Output operator > class FusedBiasActEpilogue { public: using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using OutputOp = OutputOp_; /// Output layout is always row-major using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; public: static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); public: /// Constructor CUTLASS_DEVICE FusedBiasActEpilogue( ){ } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile AccumulatorTile & fused_bias_act_accumlators, OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) bool need_bias = output_op.is_source_needed(); if (need_bias) compute_source_needed_(output_op, accumulators, fused_bias_act_accumlators, source_iterator); else compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators); } CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators); } CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile AccumulatorTile & fused_bias_act_accumlators, OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); AccumulatorFragmentIterator accum_fragment_iterator(accumulators); AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators); CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { source_iterator.load(source_fragment); ++source_iterator; typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment; fused_bias_act_fragment = output_op(accum_fragment, source_fragment); fused_bias_act_fragment_iterator.store(fused_bias_act_fragment); ++fused_bias_act_fragment_iterator; } } CUTLASS_DEVICE void compute_source_no_needed_( OutputOp const &output_op, ///< Output operator AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) AccumulatorFragmentIterator accum_fragment_iterator(accumulators); AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators); CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < AccumulatorFragmentIterator::kIterations; ++iter) { typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment; fused_bias_act_fragment = output_op(accum_fragment); fused_bias_act_fragment_iterator.store(fused_bias_act_fragment); ++fused_bias_act_fragment_iterator; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h", "repo_id": "cutlass", "token_count": 2743 }
12
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import os class replace_fix_impl: def __init__(self, src_dir, dst_dir, cutlass_deps_root): self.src_dir = src_dir self.dst_dir = dst_dir self.cutlass_deps_root = cutlass_deps_root def gen_code(self): for sub_dir in os.walk(self.src_dir): files_in_sub_dir = sub_dir[2] src_dirs = sub_dir[0] output_dirs = self.dst_dir + sub_dir[0][len(self.src_dir):] if not os.path.exists(output_dirs): os.mkdir(output_dirs) for f in files_in_sub_dir: with open(src_dirs +"/" + f, 'r') as current_file: output_lines = [] lines = current_file.readlines() for line in lines: if(len(line) >= len("#include \"cutlass") and line[:len("#include \"cutlass")] == "#include \"cutlass"): new_line = "#include \"" + self.cutlass_deps_root + line[len("#include \""):] # print(new_line) output_lines.append(new_line) else: output_lines.append(line) with open(output_dirs + "/" + f, "w+") as dest_file: dest_file.writelines(output_lines)
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py", "repo_id": "cutlass", "token_count": 1202 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing elementwise operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/epilogue/collective/detail.hpp" #include "cute/tensor.hpp" #include "cute/numeric/numeric_types.hpp" #include "gather_tensor.hpp" namespace cutlass::epilogue::collective { /// Applies an element wise operation to all elements within the fragment /// and scatter-writes them out to destination storage. /// GatherC and ScatterD are types of user-defined functions that apply the /// transoformation of the strided coordinate (e.g. through an index array). template < class StrideC_, class StrideD_, class ThreadEpilogueOp_, class EpilogueSchedule_, class GatherC_, class ScatterD_ > class EpilogueGatherScatter { public: // // Type Aliases // using EpilogueSchedule = EpilogueSchedule_; // derived types of output thread level operator using ThreadEpilogueOp = ThreadEpilogueOp_; using ElementOutput = typename ThreadEpilogueOp::ElementOutput; using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator; using ElementCompute = typename ThreadEpilogueOp::ElementCompute; using ElementScalar = ElementCompute; using ElementC = typename ThreadEpilogueOp::ElementC; using StrideC = StrideC_; using ElementD = typename ThreadEpilogueOp::ElementD; using StrideD = StrideD_; // Every epilogue needs these two GmemTiledCopy{C,D} aliases. // If you don't know what they should be, just use void. using GmemTiledCopyC = void; using GmemTiledCopyD = void; using GatherC = GatherC_; using ScatterD = ScatterD_; static const int kOutputAlignment = ThreadEpilogueOp::kCount; using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type; static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]"); static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]"); struct SharedStorage { }; // Host side epilogue arguments struct Arguments { typename ThreadEpilogueOp::Params thread_params{}; ElementC const* ptr_C = nullptr; StrideC dC{}; ElementD* ptr_D = nullptr; StrideD dD{}; GatherC gather_C{}; ScatterD scatter_D{}; }; // Device side epilogue params using Params = Arguments; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments( [[maybe_unused]] ProblemShape const& _, Arguments const& args, [[maybe_unused]] void* workspace) { return args; } template<class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( [[maybe_unused]] ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { return true; } CUTLASS_HOST_DEVICE EpilogueGatherScatter(Params const& params_) : params(params_) { } template< class ProblemShapeMNKL, class BlockShapeMNK, class BlockCoordMNKL, class FrgEngine, class FrgLayout, class TiledMma, class ResidueMNK > CUTLASS_DEVICE void operator()( ProblemShapeMNKL problem_shape_mnkl, BlockShapeMNK blk_shape_MNK, BlockCoordMNKL blk_coord_mnkl, cute::Tensor<FrgEngine, FrgLayout> const& accumulators, TiledMma tiled_mma, ResidueMNK residue_mnk, int thread_idx, char* smem_buf) { using namespace cute; using X = Underscore; static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4"); static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static"); static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3"); static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3"); (void) smem_buf; ThreadEpilogueOp epilogue_op{params.thread_params}; // Separate out problem shape for convenience auto M = get<0>(problem_shape_mnkl); auto N = get<1>(problem_shape_mnkl); auto L = get<3>(problem_shape_mnkl); auto stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC); auto stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD); // Represent the full output tensor Tensor mC_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_C), make_shape(M,N,L), stride_c, params.gather_C); // (m,n,l) Tensor mD_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), stride_d, params.scatter_D); // (m,n,l) Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) // Slice to get the tile this CTA is responsible for auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl; Tensor gC = gC_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) // Partition source and destination tiles to match the accumulator partitioning auto thr_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N) Tensor tCgC = thr_mma.partition_C(gC); // (VEC,THR_M,THR_N) static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static"); CUTE_STATIC_ASSERT_V(size(tCgC) == size(tCgD), "Source and destination must have the same number of elements."); CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators), "Accumulator count must have the same destination element count."); // Make an identity coordinate tensor for predicating our output MN tile auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD)))); Tensor tCcD = thr_mma.partition_C(cD); // source is needed if (epilogue_op.is_source_needed()) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accumulators); ++i) { if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) { tCgD(i) = epilogue_op(accumulators(i), tCgC(i)); } } } // source is not needed, avoid load else { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accumulators); ++i) { if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) { tCgD(i) = epilogue_op(accumulators(i)); } } } } private: Params params; }; } // namespace cutlass::epilogue::collective
cutlass/examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp/0
{ "file_path": "cutlass/examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp", "repo_id": "cutlass", "token_count": 3297 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <cstdlib> #include <cstdio> #include <cassert> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "cutlass/util/helper_cuda.hpp" template <class ProblemShape, class CtaTiler, class TA, class AStride, class ASmemLayout, class TiledCopyA, class TB, class BStride, class BSmemLayout, class TiledCopyB, class TC, class CStride, class CSmemLayout, class TiledMma, class Alpha, class Beta> __global__ static __launch_bounds__(decltype(size(TiledMma{}))::value) void gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler, TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a, TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b, TC * C, CStride dC, CSmemLayout , TiledMma mma, Alpha alpha, Beta beta) { using namespace cute; // Preconditions CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K) CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K) CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads static_assert(is_static<ASmemLayout>::value); static_assert(is_static<BSmemLayout>::value); static_assert(is_static<CSmemLayout>::value); CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN // // Full and Tiled Tensors // // Represent the full tensors Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K) Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K) Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N) // Get the appropriate blocks for this thread block auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k) Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N) // Shared memory buffers __shared__ TA smemA[cosize_v<ASmemLayout>]; __shared__ TB smemB[cosize_v<BSmemLayout>]; Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K,PIPE) // // Partition the copying of A and B tiles across the threads // ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x); Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k) Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K,PIPE) ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x); Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k) Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K,PIPE) CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K // // PREFETCH // auto K_PIPE_MAX = size<3>(tAsA); // Total count of tiles int k_tile_count = size<3>(tAgA); // Current tile index in gmem to read from int k_tile_next = 0; // Start async loads for all pipes but the last CUTE_UNROLL for (int k_pipe = 0; k_pipe < K_PIPE_MAX-1; ++k_pipe) { copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,k_pipe)); copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,k_pipe)); cp_async_fence(); --k_tile_count; if (k_tile_count > 0) { ++k_tile_next; } } // // Define A/B partitioning and C accumulators // ThrMMA thr_mma = mma.get_slice(threadIdx.x); Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE) Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N) // Allocate registers for pipelining Tensor tCrA = thr_mma.make_fragment_A(tCsA(_,_,_,0)); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.make_fragment_B(tCsB(_,_,_,0)); // (MMA,MMA_N,MMA_K) // Allocate the accumulators -- same size as the projected data Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N) CUTE_STATIC_ASSERT_V( shape(tCrA) == shape(tCsA)); // (MMA,MMA_M,MMA_K) CUTE_STATIC_ASSERT_V( shape(tCrB) == shape(tCsB)); // (MMA,MMA_N,MMA_K) CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N) CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K // Clear the accumulators clear(tCrC); #if 0 if(thread0()) { print(" mA : "); print( mA); print("\n"); print(" gA : "); print( gA); print("\n"); print(" sA : "); print( sA); print("\n"); print("tAgA : "); print(tAgA); print("\n"); print("tAsA : "); print(tAsA); print("\n"); } #endif #if 0 if(thread0()) { print(" mB : "); print( mB); print("\n"); print(" gB : "); print( gB); print("\n"); print(" sB : "); print( sB); print("\n"); print("tBgB : "); print(tBgB); print("\n"); print("tBsB : "); print(tBsB); print("\n"); } #endif #if 0 if(thread0()) { print(" mC : "); print( mC); print("\n"); print(" gC : "); print( gC); print("\n"); print("tCsA : "); print(tCsA); print("\n"); print("tCsB : "); print(tCsB); print("\n"); print("tCgC : "); print(tCgC); print("\n"); print("tCrA : "); print(tCrA); print("\n"); print("tCrB : "); print(tCrB); print("\n"); print("tCrC : "); print(tCrC); print("\n"); } #endif #if 1 // Current pipe index in smem to read from int smem_pipe_read = 0; // Current pipe index in smem to write to int smem_pipe_write = K_PIPE_MAX-1; // Pipe slice Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read); Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read); // Size of the register pipeline auto K_BLOCK_MAX = size<2>(tCrA); // PREFETCH register pipeline if (K_BLOCK_MAX > 1) { // Wait until our first prefetched tile is loaded in cp_async_wait<K_PIPE_MAX-2>(); __syncthreads(); // Prefetch the first rmem from the first k-tile copy(tCsA_p(_,_,Int<0>{}), tCrA(_,_,Int<0>{})); copy(tCsB_p(_,_,Int<0>{}), tCrB(_,_,Int<0>{})); } // // PIPELINED MAIN LOOP // TUTORIAL: Example of a gemm loop that pipelines shared memory using SM80's cp.async instructions // and explicit pipelines in shared memory. // Data is read from global(k_tile_next) to shared(smem_pipe_write). // Data is read from shared(smem_pipe_read) to registers(k_block_next). // Data is computed on registers(b_block). // // This allows all copies and compute to overlap: // Copy from gmem->smem can overlap with copies from smem->rmem and compute on rmem. // Copy from smem->rmem can overlap with compute on rmem. // CUTE_NO_UNROLL while (k_tile_count > -(K_PIPE_MAX-1)) { CUTE_UNROLL for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) { if (k_block == K_BLOCK_MAX - 1) { // Slice the smem_pipe_read smem tCsA_p = tCsA(_,_,_,smem_pipe_read); tCsB_p = tCsB(_,_,_,smem_pipe_read); // Commit the smem for smem_pipe_read cp_async_wait<K_PIPE_MAX-2>(); __syncthreads(); } // Load A, B shmem->regs for k_block+1 auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static copy(tCsA_p(_,_,k_block_next), tCrA(_,_,k_block_next)); copy(tCsB_p(_,_,k_block_next), tCrB(_,_,k_block_next)); // Copy gmem to smem before computing gemm on each k-pipe if (k_block == 0) { copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,smem_pipe_write)); copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,smem_pipe_write)); cp_async_fence(); // Advance the gmem tile --k_tile_count; if (k_tile_count > 0) { ++k_tile_next; } // Advance the smem pipe smem_pipe_write = smem_pipe_read; ++smem_pipe_read; smem_pipe_read = (smem_pipe_read == K_PIPE_MAX) ? 0 : smem_pipe_read; } // Thread-level register gemm for k_block gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC); } } #endif // // Epilogue // axpby(alpha, tCrC, beta, tCgC); } // Setup params for a NT GEMM template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_nt(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define NT strides (mixed) auto dA = make_stride(Int<1>{}, ldA); // (dM, dK) auto dB = make_stride(Int<1>{}, ldB); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) auto bP = Int<3>{}; // Pipeline // Define the smem layouts (static) auto sA = make_layout(make_shape(bM, bK, bP)); // (m,k,p) -> smem_idx; m-major auto sB = make_layout(make_shape(bN, bK, bP)); // (n,k,p) -> smem_idx; n-major auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major // Define the thread layouts (static) TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TA>{}, Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major Layout<Shape< _4,_1>>{});// Val layout 4x1 m-major TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TB>{}, Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major Layout<Shape< _4,_1>>{});// Val layout 4x1 n-major TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{}, Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA #if 0 print(copyA); print(copyB); print(mmaC); #endif #if 0 print_latex(copyA); print_latex(copyB); print_latex(mmaC); #endif dim3 dimBlock(size(mmaC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, copyA, B, dB, sB, copyB, C, dC, sC, mmaC, alpha, beta); } // Setup params for a NT GEMM template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_tn(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define TN strides (mixed) auto dA = make_stride(ldA, Int<1>{}); // (dM, dK) auto dB = make_stride(ldB, Int<1>{}); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) auto bP = Int<3>{}; // Pipeline // Define the smem layouts (static) auto sA_atom = make_layout(make_shape ( bM, bK), make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major auto sB_atom = make_layout(make_shape ( bN, bK), make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major auto sA = tile_to_shape(sA_atom, make_shape(bM, bK, bP)); auto sB = tile_to_shape(sA_atom, make_shape(bN, bK, bP)); auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx // Define the thread layouts (static) TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TA>, TA>{}, Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major Layout<Shape< _1,_1>>{}); // Val layout 1x1 TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TB>, TB>{}, Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major Layout<Shape< _1,_1>>{}); // Val layout 1x1 TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{}, Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA #if 0 print(copyA); print(copyB); print(mmaC); #endif #if 0 print_latex(copyA); print_latex(copyB); print_latex(mmaC); #endif dim3 dimBlock(size(mmaC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, copyA, B, dB, sB, copyB, C, dC, sC, mmaC, alpha, beta); } template <class TA, class TB, class TC, class Alpha, class Beta> void gemm(char transA, char transB, int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { if (transA == 'N' && transB == 'T') { return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } else if (transA == 'T' && transB == 'N') { return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } assert(false && "Not implemented"); } int main(int argc, char** argv) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major < 8) { std::cout << "This example requires an Ampere GPU or newer (CC >= 80)" << std::endl; // Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits. return 0; } int m = 5120; if (argc >= 2) sscanf(argv[1], "%d", &m); int n = 5120; if (argc >= 3) sscanf(argv[2], "%d", &n); int k = 4096; if (argc >= 4) sscanf(argv[3], "%d", &k); char transA = 'N'; if (argc >= 5) sscanf(argv[4], "%c", &transA); char transB = 'T'; if (argc >= 6) sscanf(argv[5], "%c", &transB); using TA = float; using TB = float; using TC = float; using TI = float; TI alpha = 1.0; TI beta = 0.0; std::cout << "M = " << m << std::endl; std::cout << "N = " << n << std::endl; std::cout << "K = " << k << std::endl; std::cout << "C = A^" << transA << " B^" << transB << std::endl; thrust::host_vector<TA> h_A(m*k); thrust::host_vector<TB> h_B(n*k); thrust::host_vector<TC> h_C(m*n); for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1); thrust::device_vector<TA> d_A = h_A; thrust::device_vector<TB> d_B = h_B; thrust::device_vector<TC> d_C = h_C; double gflops = (2.0*m*n*k) * 1e-9; const int timing_iterations = 100; GPU_Clock timer; int ldA = 0, ldB = 0, ldC = m; if (transA == 'N') { ldA = m; } else if (transA == 'T') { ldA = k; } else { assert(false); } if (transB == 'N') { ldB = k; } else if (transB == 'T') { ldB = n; } else { assert(false); } // Run once d_C = h_C; gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); CUTE_CHECK_LAST(); thrust::host_vector<TC> cute_result = d_C; // Timing iterations timer.start(); for (int i = 0; i < timing_iterations; ++i) { gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); } double cute_time = timer.seconds() / timing_iterations; CUTE_CHECK_LAST(); printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000); return 0; }
cutlass/examples/cute/tutorial/sgemm_sm80.cu/0
{ "file_path": "cutlass/examples/cute/tutorial/sgemm_sm80.cu", "repo_id": "cutlass", "token_count": 9827 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/mma.hpp> #include <cute/numeric/complex.hpp> // Config #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) # define CUTE_ARCH_MMA_SM80_ENABLED #if (__CUDA_ARCH__ <= 900) #define CUTE_ARCH_MMA_B1_AND_SM80_ENABLED #endif #if (__CUDA_ARCH__ <= 890) #define CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED #endif #endif namespace cute { //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM80_16x8x8_F16F16F16F16_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " "{%0, %1}," "{%2, %3}," "{%4}," "{%5, %6};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_F16F16F16F16_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 " "{%0, %1}," "{%2, %3, %4, %5}," "{%6, %7}," "{%8, %9};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM80_16x8x8_F32F16F16F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(b0), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_F32F16F16F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM80_16x8x8_F32BF16BF16F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(b0), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_F32BF16BF16F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x4 TN struct SM80_16x8x4_F32TF32TF32F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(b0), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x4_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM80_16x8x8_F32TF32TF32F32_TN { using DRegisters = float[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(float & d0, float & d1, float & d2, float & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, float const & c0, float const & c1, float const & c2, float const & c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "f"(c0), "f"(c1), "f"(c2), "f"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x4 TN struct SM80_8x8x4_F64F64F64F64_TN { using DRegisters = double[2]; using ARegisters = double[1]; using BRegisters = double[1]; using CRegisters = double[2]; CUTE_HOST_DEVICE static void fma(double & d0, double & d1, double const& a0, double const& b0, double const& c0, double const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=d"(d0), "=d"(d1) : "d"(a0), "d"(b0), "d"(c0), "d"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; // MMA 8x8x4 TN with Planar Complex multiplication struct SM80_8x8x4_C64C64C64C64_TN { using DRegisters = complex<double>[2]; using ARegisters = complex<double>[1]; using BRegisters = complex<double>[1]; using CRegisters = complex<double>[2]; CUTE_HOST_DEVICE static void fma(complex<double> & d0, complex<double> & d1, complex<double> const& a0, complex<double> const& b0, complex<double> const& c0, complex<double> const& c1) { // Because thrust::complex does not provide a mutable ref double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; // d.real() = a.real() * b.real() + c.real(); SM80_8x8x4_F64F64F64F64_TN::fma( rd0, rd1, a0.real(), b0.real(), c0.real(), c1.real()); // d.imag() = a.imag() * b.real() + c.imag(); SM80_8x8x4_F64F64F64F64_TN::fma( id0, id1, a0.imag(), b0.real(), c0.imag(), c1.imag()); // d.real() = -a.imag() * b.imag() + d.real(); SM80_8x8x4_F64F64F64F64_TN::fma( rd0, rd1, -a0.imag(), b0.imag(), d0.real(), d1.real()); // d.imag() = a.real() * b.imag() + d.imag(); SM80_8x8x4_F64F64F64F64_TN::fma( id0, id1, a0.real(), b0.imag(), d0.imag(), d1.imag()); } }; // MMA 8x8x4 TN with Gaussian Complex multiplication: // (a + bi)*(c + di) // yields // t0 += a*c // t1 += b*d // t2 += (a+b)*(c+d) // then // re = t0 - t1 // im = t2 - t0 - t1 struct SM80_8x8x4_GC64C64C64GC64_TN { struct GaussComplex { double t0, t1, t2; CUTE_HOST_DEVICE //constexpr operator complex<double>() const { return complex<double>(t0 - t1, t2 - t0 - t1); } CUTE_HOST_DEVICE friend //constexpr complex<double> operator*(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) * b; } CUTE_HOST_DEVICE friend //constexpr complex<double> operator*(complex<double> const& a, GaussComplex const& b) { return b * a; } CUTE_HOST_DEVICE friend //constexpr complex<double> operator+(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) + b; } CUTE_HOST_DEVICE friend //constexpr complex<double> operator+(complex<double> const& a, GaussComplex const& b) { return b + a; } }; using DRegisters = GaussComplex[2]; using ARegisters = complex<double>[1]; using BRegisters = complex<double>[1]; using CRegisters = GaussComplex[2]; CUTE_HOST_DEVICE static void fma(GaussComplex & d0, GaussComplex & d1, complex<double> const& a0, complex<double> const& b0, GaussComplex const& c0, GaussComplex const& c1) { SM80_8x8x4_F64F64F64F64_TN::fma(d0.t0, d1.t0, a0.real(), b0.real(), c0.t0, c1.t0); SM80_8x8x4_F64F64F64F64_TN::fma(d0.t1, d1.t1, a0.imag(), b0.imag(), c0.t1, c1.t1); SM80_8x8x4_F64F64F64F64_TN::fma(d0.t2, d1.t2, a0.real() + a0.imag(), b0.real() + b0.imag(), c0.t2, c1.t2); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32S8S8S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32S8S8S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32S8S8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32S8S8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S8S8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S8S8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32S8U8S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32S8U8S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32S8U8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32S8U8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S8U8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S8U8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32U8S8S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32U8S8S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32U8S8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32U8S8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U8S8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U8S8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32U8U8S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x16 TN struct SM80_8x8x16_S32U8U8S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32U8U8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM80_16x8x16_S32U8U8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U8U8S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U8U8S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32S4S4S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32S4S4S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S4S4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S4S4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32S4S4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32S4S4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32S4U4S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32S4U4S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S4U4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32S4U4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32S4U4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32S4U4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32U4S4S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32U4S4S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U4S4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U4S4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32U4S4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32U4S4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32U4U4S32_TN { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x32 TN struct SM80_8x8x32_S32U4U4S32_TN_SATURATE { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32.satfinite " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U4U4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32 " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x32 TN struct SM80_16x8x32_S32U4U4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32U4U4S32_TN { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x64 TN struct SM80_16x8x64_S32U4U4S32_TN_SATURATE { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 8x8x128 TN struct SM80_8x8x128_S32U1U1S32_TN_XORPOPC { using DRegisters = uint32_t[2]; using ARegisters = uint32_t[1]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t const& a0, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1) { #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) asm volatile( "mma.sync.aligned.m8n8k128.row.col.s32.b1.b1.s32.xor.popc " "{%0, %1}," "{%2}," "{%3}," "{%4, %5};\n" : "=r"(d0), "=r"(d1) : "r"(a0), "r"(b0), "r"(c0), "r"(c1)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x128 TN struct SM80_16x8x128_S32U1U1S32_TN_XORPOPC { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& b0, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k128.row.col.s32.b1.b1.s32.xor.popc " "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(b0), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x256 TN struct SM80_16x8x256_S32U1U1S32_TN_XORPOPC { using DRegisters = uint32_t[4]; using ARegisters = uint32_t[4]; using BRegisters = uint32_t[2]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint32_t const& b0, uint32_t const& b1, uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) { #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) asm volatile( "mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(b0), "r"(b1), "r"(c0), "r"(c1), "r"(c2), "r"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x256_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cute
cutlass/include/cute/arch/mma_sm80.hpp/0
{ "file_path": "cutlass/include/cute/arch/mma_sm80.hpp", "repo_id": "cutlass", "token_count": 35275 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/underscore.hpp> #include <cute/int_tuple.hpp> #include <cute/stride.hpp> #include <cute/numeric/arithmetic_tuple.hpp> #include <cute/numeric/integral_ratio.hpp> #include <cute/numeric/integral_constant.hpp> namespace cute { // Aliases template <class... Shapes> using Shape = cute::tuple<Shapes...>; template <class... Strides> using Stride = cute::tuple<Strides...>; template <class... Strides> using Step = cute::tuple<Strides...>; template <class... Coords> using Coord = cute::tuple<Coords...>; template <class... Layouts> using Tile = cute::tuple<Layouts...>; template <class... Ts> CUTE_HOST_DEVICE constexpr Shape<Ts...> make_shape(Ts const&... t) { return {t...}; } template <class... Ts> CUTE_HOST_DEVICE constexpr Stride<Ts...> make_stride(Ts const&... t) { return {t...}; } template <class... Ts> CUTE_HOST_DEVICE constexpr Step<Ts...> make_step(Ts const&... t) { return {t...}; } template <class... Ts> CUTE_HOST_DEVICE constexpr Coord<Ts...> make_coord(Ts const&... t) { return {t...}; } template <class... Ts> CUTE_HOST_DEVICE constexpr Tile<Ts...> make_tile(Ts const&... t) { return {t...}; } // // Layout // template <class Shape, class Stride = LayoutLeft::Apply<Shape> > struct Layout : private cute::tuple<Shape, Stride> // EBO for static layouts { // Expensive in compilation time... //static_assert(is_congruent<Shape, Stride>::value, "Shape and Stride must be congruent"); // NOTE: This defaults static Shapes/Strides correctly, but not dynamic CUTE_HOST_DEVICE constexpr Layout(Shape const& shape = {}, Stride const& stride = {}) : cute::tuple<Shape, Stride>(shape, stride) {} // // Accessors // static constexpr int rank = rank_v<Shape>; CUTE_HOST_DEVICE constexpr decltype(auto) layout() { return *this; } CUTE_HOST_DEVICE constexpr decltype(auto) layout() const { return *this; } template <int... I> CUTE_HOST_DEVICE constexpr decltype(auto) shape() { return get<0,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this)); } template <int... I> CUTE_HOST_DEVICE constexpr decltype(auto) shape() const { return get<0,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this)); } template <int... I> CUTE_HOST_DEVICE constexpr decltype(auto) stride() { return get<1,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this)); } template <int... I> CUTE_HOST_DEVICE constexpr decltype(auto) stride() const { return get<1,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this)); } // // Mappings // // Map a logical coordinate to a linear index (Coord has no Underscore slice operators) // OR // Slice the layout and return the sublayout (Coord has an Underscore slice op) template <class Coord> CUTE_HOST_DEVICE constexpr auto operator()(Coord const& coord) const { if constexpr (has_underscore<Coord>::value) { return slice(coord, *this); } else { return crd2idx(coord, shape(), stride()); } CUTE_GCC_UNREACHABLE; } // Convenience function for multi-dimensional coordinates template <class Coord0, class Coord1, class... Coords> CUTE_HOST_DEVICE constexpr auto operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const { return operator()(make_coord(c0,c1,cs...)); } // // Compose // template <class OtherLayout> CUTE_HOST_DEVICE constexpr auto compose(OtherLayout const& other) const { return composition(*this, other); } template <class... Layouts> CUTE_HOST_DEVICE constexpr auto compose(Layouts const&... layouts) const { return composition(*this, make_tile(layouts...)); } template <class OtherShape> CUTE_HOST_DEVICE constexpr auto with_shape(OtherShape const& shape) const { return composition(*this, make_layout(shape)); } template <class... Shapes> CUTE_HOST_DEVICE constexpr auto with_shape(Shapes const&... shapes) const { return composition(*this, make_layout(make_shape(shapes...))); } // // Tile // template <class OtherLayout> CUTE_HOST_DEVICE constexpr auto tile(OtherLayout const& other) const { return tiled_divide(*this, other); } template <class... Layouts> CUTE_HOST_DEVICE constexpr auto tile(Layouts const&... layouts) const { return tiled_divide(*this, make_tile(layouts...)); } // // Utility // // // Index to Coordinate // // NOTE: Only valid for compact layouts // Return the (hierarchical) ND logical coordinate corresponding to the linear index // @post crd2idx(@a result, shape(), stride()) == idx // @post congruent(@a result, shape()) template <class IInt, __CUTE_REQUIRES(is_integral<IInt>::value)> CUTE_HOST_DEVICE constexpr auto get_hier_coord(IInt const& idx) const { return cute::idx2crd(idx, shape(), stride()); } // Return the (flat) ND logical coordinate corresponding to the linear index // @post crd2idx(@a result, shape(), stride()) == idx // @post rank(@a result) == rank(shape()) && depth(@a result) == 1 template <class IInt, __CUTE_REQUIRES(is_integral<IInt>::value)> CUTE_HOST_DEVICE constexpr auto get_flat_coord(IInt const& idx) const { return cute::crd2crd(this->get_hier_coord(idx), shape(), repeat<rank>(Int<1>{})); } // Return the generalized column-major 1D logical coordinate corresponding to the linear index // @post crd2idx(@a result, shape(), stride()) == idx // @post is_integral<decltype(@a result)>::value template <class IInt, __CUTE_REQUIRES(is_integral<IInt>::value)> CUTE_HOST_DEVICE constexpr auto get_1d_coord(IInt const& idx) const { return cute::crd2idx(this->get_hier_coord(idx), shape()); } // // Coordinate to Coordinate // #if 0 // Return the (hierarchical) ND logical coordinate corresponding to the linear index // @post congruent(@a result, shape()) template <class Coord> CUTE_HOST_DEVICE constexpr auto crd_2_hier_coord(Coord const& crd) const { return cute::crd2crd(crd, shape(), shape()); } // Return the (flat) ND logical coordinate corresponding to the linear index // @post rank(@a result) == rank(shape()) && depth(@a result) == 1 template <class Coord> CUTE_HOST_DEVICE constexpr auto crd_2_flat_coord(Coord const& crd) const { return cute::crd2crd(crd, shape(), product_each(shape())); } // Return the generalized column-major 1D logical coordinate corresponding to the linear index // @post is_integral<decltype(@a result)>::value template <class Coord> CUTE_HOST_DEVICE constexpr auto crd_2_1d_coord(Coord const& crd) const { //return cute::crd2crd(crd, shape(), product(shape())); return cute::crd2idx(crd, shape()); } #endif }; // Equality, return a static or dynamic boolean template <class ShapeA, class StrideA, class ShapeB, class StrideB> CUTE_HOST_DEVICE constexpr auto operator==(Layout<ShapeA,StrideA> const& layoutA, Layout<ShapeB,StrideB> const& layoutB) { return layoutA.shape() == layoutB.shape() && layoutA.stride() == layoutB.stride(); } template <class Layout> struct is_layout : false_type {}; template <class Shape, class Stride> struct is_layout<Layout<Shape,Stride>> : true_type {}; // // Layout construction // template <class Shape, class Stride, __CUTE_REQUIRES((is_tuple<Shape >::value || is_integral<Shape >::value) && (is_tuple<Stride>::value || is_integral<Stride>::value))> CUTE_HOST_DEVICE constexpr auto make_layout(Shape const& shape, Stride const& stride) { return Layout<Shape,Stride>(shape, stride); } template <class Shape, __CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)> CUTE_HOST_DEVICE constexpr auto make_layout(Shape const& shape) { return make_layout(shape, compact_col_major(shape)); } // Construct a layout from multiple layouts by // concatenating each layout as an independent mode template <class... Shapes, class... Strides> CUTE_HOST_DEVICE constexpr auto make_layout(Layout<Shapes,Strides> const&... layouts) { return make_layout(make_shape (layouts.shape()...), make_stride(layouts.stride()...)); } // // Convenience tags for common layouts // template <class Shape> CUTE_HOST_DEVICE constexpr auto make_layout(Shape const& shape, GenColMajor) { return make_layout(shape, compact_col_major(shape)); } template <class Shape> CUTE_HOST_DEVICE constexpr auto make_layout(Shape const& shape, GenRowMajor) { return make_layout(shape, compact_row_major(shape)); } // // Advanced Layout constructions // // Make a compact layout with shape @a shape and strides following the order induced by @a order. // Dynamic values in @a order are ignored, considered large, and considered ordered from left to right. // Example: // make_ordered_layout(Shape<_2,_2,_2,_2>{}, Step<_0,_2,_3,_1>{}) // -> (_2,_2,_2,_2):(_1,_4,_8,_2) // make_ordered_layout(make_shape(2,3,4,5), make_step(Int<2>{}, 67, 42, Int<50>{})) // -> (2,3,4,5):(_1,10,30,2) template <class Shape, class Order> CUTE_HOST_DEVICE constexpr auto make_ordered_layout(Shape const& shape, Order const& order) { return make_layout(shape, compact_order(shape, order)); } // Make a compact layout with the same shape as @a layout // and strides following the order induced by @a layout.stride(). // Static-0 strides in the input @a layout are preserved in the output. // Example: // make_layout_like(Layout<Shape<_2,_2,_2,_2>, Stride<_0,_2,_4,_1>>{}) // -> (_2,_2,_2,_2):(_0,_2,_4,_1) // make_layout_like(make_layout(make_shape(2,3,4,5), make_stride(Int<0>{},42,Int<1>{},Int<0>{}))) // -> (2,3,4,5):(_0,4,_1,_0) template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto make_layout_like(Layout<Shape,Stride> const& layout) { return make_layout(layout.shape(), compact_order(filter_zeros(layout.stride(), layout.shape()), layout.stride())); } // Make a compact layout with the same shape as @a layout // and strides following the order induced by @a layout.stride(), // except mode-0 is always stride-1 and generated column-major. // The 0th mode is commonly used for MMA_Atoms or Copy_Atoms so this // generates the 0th mode with LayoutLeft (preserving stride-0s) regardless of the reference layout template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto make_fragment_like(Layout<Shape,Stride> const& layout) { constexpr int R = Layout<Shape,Stride>::rank; if constexpr (R > 1 && is_static<Shape>::value) { return tiled_product(make_layout(get<0>(layout.shape()), compact_col_major(filter_zeros(get<0>(layout.stride()), get<0>(layout.shape())))), make_ordered_layout(take<1,R>(layout.shape()), take<1,R>(layout.stride()))); } else { return make_layout(layout.shape()); } CUTE_GCC_UNREACHABLE; } template <class Shape, __CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)> CUTE_HOST_DEVICE constexpr auto make_fragment_like(Shape const& shape) { return make_layout(shape); } // // Make an identity layout that maps a coordinate to itself // template <class Shape> CUTE_HOST_DEVICE constexpr auto make_identity_layout(Shape const& shape) { return make_layout(shape, make_basis_like(shape)); } // // Operations to manipulate Layouts like a tuple of pairs // // Return the Is...th sublayout. // For Is... = <I0,I1,...,IN>, equivalent to get<IN>(...get<I1>(get<I0>(layout))) template <size_t... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto get(Layout<Shape,Stride> const& layout) { return make_layout(get<Is...>(layout.shape()), get<Is...>(layout.stride())); } // Return a new layout with only the modes in the range [B,E) template <int B, int E, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto take(Layout<Shape,Stride> const& layout) { static_assert(B < E, "take: empty range error"); static_assert(0 <= B && E <= Layout<Shape,Stride>::rank, "take: range out of bounds"); return make_layout(take<B,E>(layout.shape()), take<B,E>(layout.stride())); } // Return a new layout with only the modes Is... = <I0,I1,...,IN> template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto select(Layout<Shape,Stride> const& layout) { return make_layout(select<Is...>(layout.shape()), select<Is...>(layout.stride())); } // Return a layout with depth at most 1 template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto flatten(Layout<Shape,Stride> const& layout) { return make_layout(flatten(layout.shape()), flatten(layout.stride())); } // Return a layout whose profile is congruent to TargetProfile // @pre Input layout is flat, flatten(@a layout) == @a layout // @pre Input layout can be folded to profile, rank(@a layout) == rank(flatten(@a target_profile)) // @post congruent(@a result, @a target_profile) template <class Shape, class Stride, class TargetProfile> CUTE_HOST_DEVICE constexpr auto unflatten(Layout<Shape,Stride> const& layout, TargetProfile const& target_profile) { return make_layout(unflatten(layout.shape(), target_profile), unflatten(layout.stride(), target_profile)); } // // Utilities // // Return the sublayout of mode I... template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr decltype(auto) layout(Layout<Shape,Stride> const& layout) { if constexpr (sizeof...(Is) == 0) { return layout; } else { return get<Is...>(layout); } CUTE_GCC_UNREACHABLE; } // Return the shape of a mode template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr decltype(auto) shape(Layout<Shape,Stride>& layout) { return layout.template shape<Is...>(); } template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr decltype(auto) shape(Layout<Shape,Stride> const& layout) { return layout.template shape<Is...>(); } // Return the stride of a mode template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr decltype(auto) stride(Layout<Shape,Stride>& layout) { return layout.template stride<Is...>(); } template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr decltype(auto) stride(Layout<Shape,Stride> const& layout) { return layout.template stride<Is...>(); } // Return the number of elements in a mode template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto size(Layout<Shape,Stride> const& layout) { return size(shape<Is...>(layout)); } // Return the number of modes template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto rank(Layout<Shape,Stride> const& layout) { return rank(shape<Is...>(layout)); } // Return the depth of the layout template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto depth(Layout<Shape,Stride> const& layout) { return depth(shape<Is...>(layout)); } // Return the codomain shape of a mode // @post size(coshape(@a a)) == cosize(@a a) // @return C Coordinate with smallest elements such that // @a elem_less(sub_layout(c), C) for all c < size(@a sub_layout) // where sub_layout = get<Is...>(layout). template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto coshape(Layout<Shape,Stride> const& layout) { // Protect against negative strides auto abs_sub_layout = make_layout(shape<Is...>(layout), transform_leaf(stride<Is...>(layout), abs_fn{})); auto co_coord = as_arithmetic_tuple(abs_sub_layout(size(abs_sub_layout) - Int<1>{})); return co_coord + repeat_like(co_coord, Int<1>{}); } // Return the codomain size of a mode // @return M smallest integer such that // @a sub_layout(c) < M for all c < size(@a sub_layout) // where sub_layout = get<Is...>(layout). template <int... Is, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto cosize(Layout<Shape,Stride> const& layout) { return size(coshape<Is...>(layout)); } template <class Layout> using cosize_t = decltype(cosize(declval<Layout>())); template <class Layout> static constexpr int cosize_v = cosize_t<Layout>::value; // With crd2idx(coord, shape), makes sense to have crd2idx(coord, Layout) as well template <class Coord, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto crd2idx(Coord const& c, Layout<Shape,Stride> const& layout) { return crd2idx(c, layout.shape(), layout.stride()); } // // Slice and Dice a layout // template <class Coord, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto slice(Coord const& c, Layout<Shape,Stride> const& layout) { return make_layout(slice(c, layout.shape()), slice(c, layout.stride())); } template <class Coord, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto slice_and_offset(Coord const& c, Layout<Shape,Stride> const& layout) { return cute::make_tuple(slice(c, layout), crd2idx(c, layout)); } template <class Coord, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto dice(Coord const& c, Layout<Shape,Stride> const& layout) { return make_layout(dice(c, layout.shape()), dice(c, layout.stride())); } // Compute a pointer offset and (potentially modified) layout from a coordinate // This exists so it can be overloaded for ComposedLayout template <class Coord, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto domain_offset(Coord const& coord, Layout<Shape,Stride> const& layout) { return cute::make_tuple(layout, layout(coord)); } // // Transform the modes of a layout // namespace detail { template <class Tuple, class F, int... I> CUTE_HOST_DEVICE constexpr auto transform_layout(Tuple const& t, F&& f, seq<I...>) { return make_layout(f(get<I>(t))...); } template <class Tuple0, class Tuple1, class F, int... I, int... I0, int... I1> CUTE_HOST_DEVICE constexpr auto transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f, seq<I...>, seq<I0...>, seq<I1...>) { return make_layout(f(get<I>(t0),get<I>(t1))..., get<I0>(t0)..., get<I1>(t1)...); } } // end namespace detail template <class Tuple, class F> CUTE_HOST_DEVICE constexpr auto transform_layout(Tuple const& t, F&& f) { return detail::transform_layout(t, f, make_seq<decltype(rank(t))::value>{}); } template <class Tuple0, class Tuple1, class F> CUTE_HOST_DEVICE constexpr auto transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f) { constexpr int R0 = decltype(rank(t0))::value; constexpr int R1 = decltype(rank(t1))::value; constexpr int R = (R0 < R1) ? R0 : R1; return detail::transform_layout(t0, t1, f, make_seq<R>{}, make_range<R,R0>{}, make_range<R,R1>{}); } // // Coalesce and Filter // namespace detail { // Look at each element and the front of the stack (in order of priority) // front(NewLayout) get<I>(Layout) // s0:d0 _1:d1 => continue // _1:d0 s1:d1 => replace_front s1:d1 // s0:s1*d1 s1:d1 => replace_front s0*s1:d1 // s0:d0 s1:d1 => prepend s1:d1 // // @pre OldShape and OldStride are flat template <int I, class OldShape, class OldStride, class NewShape, class NewStride> CUTE_HOST_DEVICE constexpr auto bw_coalesce(OldShape const& old_shape, OldStride const& old_stride, NewShape const& new_shape, NewStride const& new_stride) { if constexpr (I == -1) { // Base case, we're done if constexpr (is_constant<1, NewShape>::value) { return Layout<_1,_0>{}; } else { return Layout<NewShape,NewStride>{new_shape,new_stride}; } } else if constexpr (is_constant<1, decltype(get<I>(old_shape))>::value) { // shape<I>(layout) == _1, skip it and continue return bw_coalesce<I-1>(old_shape, old_stride, new_shape, new_stride); } else if constexpr (is_constant<1, NewShape>::value) { // Replace our shape-1 with anything (Can only happen on input new_shape/new_stride) return bw_coalesce<I-1>(old_shape, old_stride, get<I>(old_shape), get<I>(old_stride)); } else if constexpr (is_static<decltype(get<0>(new_shape))>::value && is_constant<true, decltype(get<I>(old_shape) * get<I>(old_stride) == get<0>(new_stride))>::value) { // Merge modes because the shapes and strides match return bw_coalesce<I-1>(old_shape, old_stride, replace_front(new_shape, get<I>(old_shape) * get<0>(new_shape)), replace_front(new_stride, get<I>(old_stride))); } else { // Can't replace or merge, so prepend a new mode return bw_coalesce<I-1>(old_shape, old_stride, prepend(new_shape, get<I>(old_shape)), prepend(new_stride, get<I>(old_stride))); } CUTE_GCC_UNREACHABLE; } // cute::coalesce promises to not change the Layout as a function from integers to codomain. // It accomplishes this inside of the Layout's domain, but not always outside of the domain. // Example: (_4,_1):(_1,_0) coalesces to _4:_1. // detail::coalesce_x preserves the Layout function inside its domain and outside. // // @post depth(@a result) <= 1 // @post for all i, 0 <= i, @a layout(i) == @a result(i) template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto coalesce_x(Layout<Shape,Stride> const& layout) { auto flat_shape = flatten(layout.shape()); auto flat_stride = flatten(layout.stride()); constexpr int R = decltype(rank(flat_shape))::value; if constexpr (is_constant<1, decltype(get<R-1>(flat_shape))>::value) { return detail::bw_coalesce<R-2>(flat_shape, flat_stride, Int<2>{}, get<R-1>(flat_stride)); } else { return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride)); } } // Apply coalesce_x at the terminals of trg_profile template <class Shape, class Stride, class IntTuple> CUTE_HOST_DEVICE constexpr auto coalesce_x(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile) { if constexpr (is_tuple<IntTuple>::value) { static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank); return cute::transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce_x(l,t); }); } else { return coalesce_x(layout); } CUTE_GCC_UNREACHABLE; } } // end namespace detail // "Simplify" the layout by combining modes that are possible to combine // Does not respect the shape of the layout, but does preserve total size // @post size(@a result) == size(@a layout) // @post depth(@a result) <= 1 // @post for all i, 0 <= i < size(@a layout), @a layout(i) == @a result(i) template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto coalesce(Layout<Shape,Stride> const& layout) { auto flat_shape = flatten(layout.shape()); auto flat_stride = flatten(layout.stride()); constexpr int R = decltype(rank(flat_shape))::value; return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride)); } // Apply coalesce at the terminals of trg_profile template <class Shape, class Stride, class IntTuple> CUTE_HOST_DEVICE constexpr auto coalesce(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile) { if constexpr (is_tuple<IntTuple>::value) { static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank); return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce(l,t); }); } else { return coalesce(layout); } CUTE_GCC_UNREACHABLE; } // Combine static and dynamic modes of a shape. // @post size(@a result) == size(@a shape) // @post depth(@a result) <= 1 template <class Shape> CUTE_HOST_DEVICE constexpr auto coalesce(Shape const& shape) { static_assert(is_integral<Shape>::value || is_tuple<Shape>::value); return cute::fold_first(flatten(shape), [](auto const& init, auto const& a) { if constexpr (is_static<decltype(back(init))>::value == is_static<decltype(a)>::value) { return replace_back(init, back(init) * a); // Both static or both dynamic, coalesce and replace } else { return append(init, a); // Can't coalesce, so append } }); } // Replace the modes in layout that have a 0-stride with a 1-size template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto filter_zeros(Layout<Shape,Stride> const& layout) { return make_layout(filter_zeros(layout.stride(), layout.shape()), layout.stride()); } // Remove all of the 0-strides and 1-sizes // Return 1-shape if empty template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto filter(Layout<Shape,Stride> const& layout) { return coalesce(filter_zeros(layout)); } // Apply filter at the terminals of trg_profile template <class Shape, class Stride, class IntTuple> CUTE_HOST_DEVICE constexpr auto filter(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile) { if constexpr (is_tuple<IntTuple>::value) { static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank); return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return filter(l,t); }); } else { return filter(layout); } CUTE_GCC_UNREACHABLE; } // // Append, Prepend, Replace // template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0> CUTE_HOST_DEVICE constexpr auto append(Layout<ShapeA,StrideA> const& layout, Layout<ShapeX,StrideX> const& x = {}) { return make_layout(append<N>(layout.shape(), x.shape()), append<N>(layout.stride(), x.stride())); } template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0> CUTE_HOST_DEVICE constexpr auto append(Layout<ShapeA,StrideA> const& layout, Layout<ShapeX,StrideX> const& x = {}) { return make_layout(append(layout.shape(), x.shape()), append(layout.stride(), x.stride())); } template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0> CUTE_HOST_DEVICE constexpr auto prepend(Layout<ShapeA,StrideA> const& layout, Layout<ShapeX,StrideX> const& x = {}) { return make_layout(prepend<N>(layout.shape(), x.shape()), prepend<N>(layout.stride(), x.stride())); } template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0> CUTE_HOST_DEVICE constexpr auto prepend(Layout<ShapeA,StrideA> const& layout, Layout<ShapeX,StrideX> const& x = {}) { return make_layout(prepend(layout.shape(), x.shape()), prepend(layout.stride(), x.stride())); } template <int N, class ShapeA, class StrideA, class ShapeX, class StrideX> CUTE_HOST_DEVICE constexpr auto replace(Layout<ShapeA,StrideA> const& layout, Layout<ShapeX,StrideX> const& x) { return make_layout(replace<N>(layout.shape(), x.shape()), replace<N>(layout.stride(), x.stride())); } template <int B, int E, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto group(Layout<Shape,Stride> const& layout) { return make_layout(group<B,E>(layout.shape()), group<B,E>(layout.stride())); } // // Composition of two layouts: lhs o rhs // @post compatible(rhs, result) // @post result(c) = lhs(rhs(c)) // for all c in the domain of rhs // namespace detail { template <class LShape, class LStride, class RShape, class RStride> CUTE_HOST_DEVICE constexpr auto composition_impl(LShape const& lhs_shape, LStride const& lhs_stride, RShape const& rhs_shape, RStride const& rhs_stride) { if constexpr (is_tuple<RShape>::value) { // Apply the right-distributivity of Layout composition return transform_layout(rhs_shape, rhs_stride, [&](auto const& s, auto const& d) { return composition_impl(lhs_shape, lhs_stride, s, d); }); } else if constexpr (is_scaled_basis<RStride>::value) { // Special case for a ScaledBasis stride return composition_impl(basis_get(rhs_stride, lhs_shape), basis_get(rhs_stride, lhs_stride), rhs_shape, basis_value(rhs_stride)); } else if constexpr (is_constant<0, RStride>::value) { // Special case shortcut for any static stride-0 return Layout<RShape, RStride>{rhs_shape, rhs_stride}; } else if constexpr (is_integral<decltype(lhs_shape)>::value) { // Special case shortcut for any integral LShape return Layout{rhs_shape, rhs_stride * lhs_stride}; } else if constexpr (is_constant<1, RStride>::value) { // Special case shortcut for any static stride-1 constexpr int R = rank_v<LShape>; auto result_shape_0 = take<0,R-1>(lhs_shape); // Mod out the rhs_shape from the lhs_shape auto const [result_shape_1, rest_shape] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_shape), [] (auto const& init, auto const& si) { return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si))); }); // Jump into coalesce and append (rest_shape, get<R-1>(lhs_stride)) return detail::bw_coalesce<R-2>(result_shape_1, lhs_stride, rest_shape, get<R-1>(lhs_stride)); } else { // General case: integral RShape and RStride, tuple LShape and LStride constexpr int R = rank_v<LShape>; auto result_shape_0 = take<0,R-1>(lhs_shape); auto result_stride_0 = take<0,R-1>(lhs_stride); // Divide out the rhs_stride from the lhs_shape auto const [result_shape_1, rest_stride] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_stride), [] (auto const& init, auto const& di) { return cute::make_tuple(append(get<0>(init), shape_div(di, get<1>(init))), shape_div(get<1>(init), di)); }); // Apply any lhs_shape changes to the stride auto result_stride_1 = elem_scale(result_stride_0, shape_div(result_shape_0, result_shape_1)); // Mod out the rhs_shape from the lhs_shape auto const [result_shape_2, rest_shape] = fold(result_shape_1, cute::make_tuple(cute::make_tuple(), rhs_shape), [] (auto const& init, auto const& si) { return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si))); }); // Jump into coalesce and append (rest_shape, rest_stride * get<R-1>(lhs_stride)) return detail::bw_coalesce<R-2>(result_shape_2, result_stride_1, rest_shape, rest_stride * get<R-1>(lhs_stride)); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class LShape, class LStride, class RShape, class RStride> CUTE_HOST_DEVICE constexpr auto composition(Layout<LShape,LStride> const& lhs, Layout<RShape,RStride> const& rhs) { auto coprofile = repeat_like(decltype(coshape(rhs)){}, Int<0>{}); auto flat_lhs = detail::coalesce_x(lhs, coprofile); return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs.shape(), rhs.stride()); } template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto composition(Layout<LShape,LStride> const& lhs, Tiler const& rhs) { if constexpr (is_tuple<Tiler>::value) { static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank); // Drop any modes of lhs that aren't hit by rhs return detail::transform_layout(lhs, rhs, [](auto const& l, auto const& r) { return composition(l,r); }, make_seq<tuple_size<Tiler>::value>{}, seq<>{}, seq<>{}); } else if constexpr (is_underscore<Tiler>::value) { return lhs; } else if constexpr (is_integral<Tiler>::value) { auto flat_lhs = detail::coalesce_x(lhs); return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs, Int<1>{}); } CUTE_GCC_UNREACHABLE; } // // Complement // // Build the complement of a layout. // @post size(@a result) >= @a cosize_hi / size(filter(@a layout))); // @post For all i in [1,size(@a result)), // @a result(i) < @a result(i-1) // For all j in [0, size(@a layout)), // @a result(i) != @a layout(j) // namespace detail { // @pre @a layout has been filtered (flattened and no stride-0 or size-1 modes). template <class Shape, class Stride, class CoTarget> CUTE_HOST_DEVICE constexpr auto complement(Shape const& shape, Stride const& stride, CoTarget const& cotarget) { if constexpr (is_constant<0, Stride>::value) { // Special case for irreducible rank-1 stride-0 layout return make_layout(coalesce(cotarget)); } else { // General case constexpr int R = rank_v<Shape>; static_assert(R == 1 || is_static<Stride>::value, "Dynamic-stride complement only for rank-1 layouts"); // Should just be a sort and a fold... // Then we could even handle dynamic strides (but they would destroy all static strides) auto [shape_, stride_, result_shape_, result_stride] = fold(make_seq<R-1>{}, cute::make_tuple(shape, stride, cute::make_tuple(), cute::make_tuple(Int<1>{})), [](auto const& init, auto i) { auto [shape, stride, result_shape, result_stride] = init; auto min_stride = cute::min(stride); auto min_idx = cute::find(stride, min_stride); auto new_shape = min_stride / get<i>(result_stride); auto new_stride = min_stride * get<min_idx>(shape); static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement."); return cute::make_tuple(remove<min_idx>(shape), // Remove the min_idx from shape remove<min_idx>(stride), // Remove the min_idx from stride append(result_shape , new_shape ), // new shape = min_stride / last_stride append(result_stride, new_stride)); // new stride = min_stride * curr_shape }); // Append the last shape mode auto new_shape = get<0>(stride_) / get<R-1>(result_stride); // new shape = min_stride / last_stride static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement."); auto result_shape = append(result_shape_, new_shape); // Compute the rest_shape and rest_stride auto new_stride = get<0>(stride_) * get<0>(shape_); // new stride = min_stride * curr_shape auto rest_shape = coalesce(ceil_div(cotarget, new_stride)); auto rest_stride = compact_col_major(rest_shape, new_stride); // Coalesce and append (rest_shape, rest_stride) return coalesce(make_layout(make_shape (result_shape , rest_shape ), make_stride(result_stride, rest_stride))); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class Shape, class Stride, class CoTarget> CUTE_HOST_DEVICE constexpr auto complement(Layout<Shape,Stride> const& layout, CoTarget const& cotarget) { auto filter_layout = filter(layout); return detail::complement(filter_layout.shape(), filter_layout.stride(), shape(cotarget)); } template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto complement(Layout<Shape,Stride> const& layout) { auto filter_layout = filter(layout); return detail::complement(filter_layout.shape(), filter_layout.stride(), cosize(filter_layout)); } // // Right-Inverse and Left-Inverse // namespace detail { template <int NextStride, class Shape, class Stride, int... Is> CUTE_HOST_DEVICE constexpr auto inverse_seq(Shape const& shape, Stride const& stride, seq<Is...>) { auto next_I = cute::find_if(stride, [](auto a) { return is_constant<NextStride, decltype(a)>{}; }); if constexpr (next_I == decltype(rank(stride))::value) { // If not found, return current seq return seq<Is...>{}; } else { // auto next_stride = get<next_I>(shape) * get<next_I>(stride); // NOTE: Needed for g++-7 using next_stride = decltype(get<next_I>(shape) * get<next_I>(stride)); if constexpr (is_static<next_stride>::value && !is_constant<NextStride, next_stride>::value) { // If next_stride is static and unique, then continue return inverse_seq<next_stride::value>(shape, stride, seq<Is..., next_I>{}); } else { // Else return current seq + next_I return seq<Is..., next_I>{}; } } CUTE_GCC_UNREACHABLE; } } // end namespace detail // // Build the right-inverse of a layout // @pre is_static<Layout> // @result A layout @a result such that // @a layout(@a result(i)) == i for all i < size(@a result) // @result A layout @a result such that // composition(@a layout, @a result) is identical to make_layout(shape(result)) // template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto right_inverse(Layout<Shape,Stride> const& layout) { auto flat_layout = coalesce(layout); auto astride = transform_leaf(flat_layout.stride(), abs_fn{}); // Find Int<1>{}, the starting stride, and follow the strides to gen inverse_seq [[maybe_unused]] auto iseq = detail::inverse_seq<1>(flat_layout.shape(), astride, seq<>{}); if constexpr (iseq.size() == 0) { return Layout<_1,_0>{}; // Empty case, nothing found } else { // Generate the corresponding new strides and construct auto rstride = compact_col_major(flat_layout.shape()); return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })), unwrap(transform(iseq, [&](auto i) { return signum(stride<i>(flat_layout)) * get<i>(rstride); }))); } CUTE_GCC_UNREACHABLE; } CUTE_HOST_DEVICE constexpr auto right_inverse(Underscore const& _) { return _; } // // Build the left-inverse of a layout // @pre is_static<Layout> // @pre @a layout is an injective function // @result A layout @a result such that // @a result(@a layout(i)) == i for all i < size(@a layout) // @result A layout @a result such that // composition(@a result, @a layout) is identical to make_layout(shape(layout)) // template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto left_inverse(Layout<Shape,Stride> const& layout) { return right_inverse(make_layout(layout, complement(layout))); } CUTE_HOST_DEVICE constexpr auto left_inverse(Underscore const& _) { return _; } // // Max Common Layout // /* Return a layout that points to the maximum number of contiguous elements * that logically correspond in the layouts of @a a and @a b. * * @returns Layout R * @post For all 0 <= i < size(R), a(R(i)) == i and b(R(i)) == i */ template <class ShapeA, class StrideA, class ShapeB, class StrideB> CUTE_HOST_DEVICE constexpr auto max_common_layout(Layout<ShapeA,StrideA> const& a, Layout<ShapeB,StrideB> const& b) { Layout inv_b = right_inverse(b); Layout common = coalesce(composition(a, inv_b)); // Keep only the static identity component of the common layout if constexpr (is_static<decltype(shape<0>(common))>::value && is_constant<1, decltype(stride<0>(common))>::value) { // Truncate to the size of the contiguous vector (static stride-1 mode) return composition(inv_b, layout<0>(common)); } else { return Layout<_1,_0>{}; } } /* Return Int<N> such that N is the maximum number of contiguous elements * that logically correspond in the layouts of @a a and @a b. * * @returns Int<N> with N >= 1 * @post For all 0 <= n < N, a(b.get_1d_coord(n)) == n * (NOTE: Problems with negative strides/coords in this post-condition) */ template <class ShapeA, class StrideA, class ShapeB, class StrideB> CUTE_HOST_DEVICE constexpr auto max_common_vector(Layout<ShapeA,StrideA> const& a, Layout<ShapeB,StrideB> const& b) { Layout common = coalesce(composition(a, right_inverse(b))); // Keep only the static identity component of the common layout if constexpr (is_static<decltype(shape<0>(common))>::value && is_constant<1, decltype(stride<0>(common))>::value) { // Truncate to the size of the contiguous vector (static stride-1 mode) return shape<0>(common); } else { return Int<1>{}; } CUTE_GCC_UNREACHABLE; } // // Kernel (Nullspace) of a Layout // namespace detail { template <int NextI, class Stride, int... Is> CUTE_HOST_DEVICE constexpr auto nullspace_seq(Stride const& stride, seq<Is...>) { if constexpr (NextI == rank_v<Stride>) { return seq<Is...>{}; } else if constexpr (is_constant<0, decltype(get<NextI>(stride))>::value) { return detail::nullspace_seq<NextI+1>(stride, seq<Is..., NextI>{}); } else { return detail::nullspace_seq<NextI+1>(stride, seq<Is...>{}); } CUTE_GCC_UNREACHABLE; } } // end namespace detail // // Build the nullspace of a layout // @result A layout @a result such that // size(@a result) == size(@a layout) / size(filter(@a layout)) // @a layout(@a result(i)) == 0 for all i < size(@a result) // template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto nullspace(Layout<Shape,Stride> const& layout) { auto flat_layout = flatten(layout); auto iseq = detail::nullspace_seq<0>(flat_layout.stride(), seq<>{}); if constexpr (iseq.size() == 0) { return Layout<_1,_0>{}; // Empty case, nothing found } else { // Generate the corresponding new strides and construct auto rstride = compact_col_major(flat_layout.shape()); return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })), unwrap(transform(iseq, [&](auto i) { return get<i>(rstride); }))); } CUTE_GCC_UNREACHABLE; } // // Zip // template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto zip(Layout<Shape,Stride> const& layout) { return make_layout(zip(layout.shape()), zip(layout.stride())); } template <class TShape, class TStride, class UShape, class UStride> CUTE_HOST_DEVICE constexpr auto zip(Layout<TShape,TStride> const& layoutA, Layout<UShape,UStride> const& layoutB) { return make_layout(zip(layoutA.shape(), layoutB.shape()), zip(layoutA.stride(), layoutB.stride())); } // // Tile unzip // Logical product and logical divide (on layouts) produce rank-2 results by design. // Follow the profile of @a tile and zip the rank-2 modes located at the terminals into // their own mode. // template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto tile_unzip(Layout<LShape,LStride> const& layout, Tiler const& tiler) { return make_layout(zip2_by(layout.shape(), tiler), zip2_by(layout.stride(), tiler)); } // // Logical divide // template <class LShape, class LStride, class TShape, class TStride> CUTE_HOST_DEVICE constexpr auto logical_divide(Layout<LShape,LStride> const& layout, Layout<TShape,TStride> const& tiler) { return composition(layout, make_layout(tiler, complement(tiler, shape(layout)))); } template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto logical_divide(Layout<LShape,LStride> const& layout, Tiler const& tiler) { if constexpr (is_tuple<Tiler>::value) { static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_divide: Too many modes in tiler."); return transform_layout(layout, tiler, [](auto const& l, auto const& t) { return logical_divide(l,t); }); } else if constexpr (is_underscore<Tiler>::value) { return layout; } else if constexpr (is_integral<Tiler>::value) { return logical_divide(layout, make_layout(tiler)); } CUTE_GCC_UNREACHABLE; } // Generalization of ceil_div for Layout lhs // is effectively the "rest mode" of logical_divide. // Occurs in the calculation of gridDim, for example, for generalized tilers // Example: // dim3 gridDim(size(ceil_div(problem_shape_M, cta_tiler_M)), // size(ceil_div(problem_shape_N, cta_tiler_N))); // This does not consider compositional acceptance, so it may be the case that // ceil_div produces a result while logical_divide (and friends) do not. template <class Target, class TShape, class TStride> CUTE_HOST_DEVICE constexpr auto ceil_div(Target const& target, Layout<TShape,TStride> const& tiler) { return complement(tiler, size(target)); } // // Convenience operator // that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y)) // by gathering the tile modes and residuals into a rank-2 result. // template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto zipped_divide(Layout<LShape,LStride> const& layout, Tiler const& tiler) { return tile_unzip(logical_divide(layout, tiler), tiler); } // Same as zipped_divide, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y) template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto tiled_divide(Layout<LShape,LStride> const& layout, Tiler const& tiler) { auto result = zipped_divide(layout, tiler); auto R1 = rank<1>(result); return result(_, repeat<R1>(_)); } // Same as zipped_divide, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y) template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto flat_divide(Layout<LShape,LStride> const& layout, Tiler const& tiler) { auto result = zipped_divide(layout, tiler); auto R0 = rank<0>(result); auto R1 = rank<1>(result); return result(repeat<R0>(_), repeat<R1>(_)); } // // Logical product // template <class LShape, class LStride, class TShape, class TStride> CUTE_HOST_DEVICE constexpr auto logical_product(Layout<LShape,LStride> const& block, Layout<TShape,TStride> const& tiler) { return make_layout(block, composition(complement(block, size(block)*cosize(tiler)), tiler)); } template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto logical_product(Layout<LShape,LStride> const& block, Tiler const& tiler) { if constexpr (is_tuple<Tiler>::value) { static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_product: Too many modes in tiler."); return transform_layout(block, tiler, [](auto const& l, auto const& t) { return logical_product(l,t); }); } else if constexpr (is_underscore<Tiler>::value) { return block; } else if constexpr (is_integral<Tiler>::value) { return logical_product(block, make_layout(tiler)); } CUTE_GCC_UNREACHABLE; } // // Convenience operator // that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y)) // by gathering the block modes and products into a rank-2 result. // template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto zipped_product(Layout<LShape,LStride> const& block, Tiler const& tiler) { return tile_unzip(logical_product(block, tiler), tiler); } // Same as zipped_product, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y) template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto tiled_product(Layout<LShape,LStride> const& block, Tiler const& tiler) { auto result = zipped_product(block, tiler); auto R1 = rank<1>(result); return result(_, repeat<R1>(_)); } // Same as zipped_product, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y) template <class LShape, class LStride, class Tiler> CUTE_HOST_DEVICE constexpr auto flat_product(Layout<LShape,LStride> const& block, Tiler const& tiler) { auto result = zipped_product(block, tiler); auto R0 = rank<0>(result); auto R1 = rank<1>(result); return result(repeat<R0>(_), repeat<R1>(_)); } // // Rank-sensitive products // // blocked_product -- Reproduce a block over a tiler. // Think of every element of "tiler" as a "block" // and return the layout of the resulting structure. // @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler)) template <class TShape, class TStride, class UShape, class UStride> CUTE_HOST_DEVICE constexpr auto blocked_product(Layout<TShape,TStride> const& block, Layout<UShape,UStride> const& tiler) { constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>); auto result = logical_product(append<R>(block), append<R>(tiler)); return coalesce(zip(get<0>(result), get<1>(result)), tuple_repeat<R>(Int<1>{})); } // raked_product -- Reproduce a block over a tiler with block-interleaving. // Think of every element of "tiler" as a "block", interleave those blocks, // and return the layout of the resulting structure. // @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler)) template <class TShape, class TStride, class UShape, class UStride> CUTE_HOST_DEVICE constexpr auto raked_product(Layout<TShape,TStride> const& block, Layout<UShape,UStride> const& tiler) { constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>); auto result = logical_product(append<R>(block), append<R>(tiler)); return coalesce(zip(get<1>(result), get<0>(result)), tuple_repeat<R>(Int<1>{})); } // tile_to_shape -- Perform a product of a layout so that the result matches a target shape. // This is similar to blocked_product, but specifies the result shape instead of the // product shape, which is more convenient in certain circumstances. // @param block The layout to repeat // @param trg_shape The target shape of the result // @param ord_shape The order of the modes of @a trg_shape to tile @a layout with. // Defaults to GenColMajor, so @a layout will repeat // across the first mode first, the second mode second, etc // E.g. Step<_2,_1,_3> will cause @a layout to repeat // across the second mode first, the first mode second, and the third mode last. // @pre rank(@a block) <= rank(@a trg_shape) // @post compatible(@a trg_shape, shape(@a result)) template <class Shape, class Stride, class TrgShape, class ModeOrder = LayoutLeft> CUTE_HOST_DEVICE constexpr auto tile_to_shape(Layout<Shape,Stride> const& block, TrgShape const& trg_shape, ModeOrder const& ord_shape = {}) { CUTE_STATIC_ASSERT_V(rank(block) <= rank(trg_shape), "Rank of layout must be <= rank of target shape."); constexpr int R = rank_v<TrgShape>; auto padded_block = append<R>(block); auto block_shape = product_each(shape(padded_block)); auto target_shape = product_each(shape(trg_shape)); // Assert proper division if constexpr (is_static<decltype(target_shape)>::value) { CUTE_STATIC_ASSERT_V(weakly_compatible(block_shape, target_shape), "tile_to_shape: block shape does not divide the target shape."); } auto product_shape = ceil_div(target_shape, block_shape); return coalesce(blocked_product(padded_block, make_ordered_layout(product_shape, ord_shape)), product_shape); } // // Upcast // For stride-1 mode, divide size by N. Divide all other strides by N. // template <int N, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto upcast(Shape const& shape, Stride const& stride) { if constexpr (is_tuple<Shape>::value) { // tuple stride return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N>(s,d); }); } else if constexpr (is_constant<0, Stride>::value) { // static-0 stride return Layout<Shape,Stride>{shape,stride}; } else if constexpr (is_static<Stride>::value) { // static stride return make_layout(shape_div(shape, shape_div(Int<N>{}, abs(stride))), shape_div(stride, Int<N>{})); } else { // dynamic stride // assume dynamic strides are larger than N and divisible // assert(stride % N == 0); return make_layout(shape, safe_div(stride, Int<N>{})); } CUTE_GCC_UNREACHABLE; } template <int N, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto upcast(Layout<Shape,Stride> const& layout) { return upcast<N>(layout.shape(), layout.stride()); } // // Downcast // For stride-1 mode, multiply size by N. Multiply all other strides by N. // template <int N, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto downcast(Shape const& shape, Stride const& stride) { if constexpr (is_tuple<Shape>::value) { return transform_layout(shape, stride, [](auto const& s, auto const& d) { return downcast<N>(s,d); }); } else if constexpr (is_constant<1, Stride>::value || is_constant<-1, Stride>::value) { return make_layout(shape * Int<N>{}, stride); } else { return make_layout(shape, stride * Int<N>{}); } CUTE_GCC_UNREACHABLE; } template <int N, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto downcast(Layout<Shape,Stride> const& layout) { CUTE_STATIC_ASSERT(has_int1<Stride>::value, "Downcast requires adjacent elements"); return downcast<N>(layout.shape(), layout.stride()); } // // Recast // template <class OldType, class NewType, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto recast_layout(Layout<Shape,Stride> const& layout) { using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{})); if constexpr (scale::num == 1 && scale::den == 1) { return layout; } else if constexpr (scale::num == 1) { return downcast<scale::den>(layout); } else if constexpr (scale::den == 1) { return upcast<scale::num>(layout); } else { static_assert(dependent_false<scale>, "Recast not supported."); } CUTE_GCC_UNREACHABLE; } // // Display utilities // template <class Shape, class Stride> CUTE_HOST_DEVICE void print(Layout<Shape,Stride> const& layout) { print(layout.shape()); print(":"); print(layout.stride()); } #if !defined(__CUDACC_RTC__) template <class Shape, class Stride> CUTE_HOST std::ostream& operator<<(std::ostream& os, Layout<Shape,Stride> const& layout) { return os << shape(layout) << ":" << stride(layout); } #endif // Generic 2D Layout to console table template <class Layout> CUTE_HOST_DEVICE void print_layout(Layout const& layout) // (m,n) -> idx { CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{}); int idx_width = num_digits(cosize(layout)) + 2; const char* delim = "+-----------------------"; print(layout); print("\n"); // Column indices print(" "); for (int n = 0; n < size<1>(layout); ++n) { printf(" %*d ", idx_width-2, n); } printf("\n"); // Print out A m-by-n for (int m = 0; m < size<0>(layout); ++m) { // Header print(" "); for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); } printf("+\n"); // Values printf("%2d ", m); // Row indices for (int n = 0; n < size<1>(layout); ++n) { printf("| %*d ", idx_width-2, int(layout(m,n))); } printf("|\n"); } // Footer print(" "); for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); } printf("+\n"); } // Generic ThrVal 2D Layout to console table template <class Layout, class ThrID> CUTE_HOST_DEVICE void print_layout(Layout const& layout, ThrID const& thrid) // (m,n) -> (tid,vid) and tid -> thr_idx { CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{}); print(layout); print("\n"); print(thrid); print("\n"); // Print out m-by-n for (int m = 0; m < size<0>(layout); ++m) { // Header for (int n = 0; n < size<1>(layout); ++n) printf("+------"); printf("+\n"); // Values for (int n = 0; n < size<1>(layout); ++n) printf("|%03d-%02d", int(thrid(layout(m,n) % size(thrid))), int(layout(m,n) / size(thrid))); printf("|\n"); } // Footer for (int n = 0; n < size<1>(layout); ++n) printf("+------"); printf("+\n"); } // Generic 2D Layout to Latex printer -- B&W 8-value color coding template <class LayoutA> CUTE_HOST_DEVICE void print_latex(LayoutA const& layout_a) { CUTE_STATIC_ASSERT_V(rank(layout_a) <= Int<2>{}); auto layout = append<2>(layout_a, Layout<_1,_0>{}); char const* latex_header = "\\documentclass[convert]{standalone}\n" "\\usepackage{tikz}\n\n" "\\begin{document}\n" "\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center,font=\\Large}]\n\n"; char const* latex_footer = "\\end{tikzpicture}\n" "\\end{document}\n"; char const* color_map[8] = {"black!00", "black!40", "black!20", "black!60", "black!10", "black!50", "black!30", "black!70"}; // Header printf("%% Layout: "); print(layout); printf("\n"); printf(latex_header); // Layout for (int i = 0; i < size<0>(layout); ++i) { for (int j = 0; j < size<1>(layout); ++j) { int idx = layout(i,j); printf("\\node[box,fill=%s] at (%d,%d) {%d};\n", color_map[idx % 8], i, j, idx); } } // Labels for (int i = 0, j = -1; i < size<0>(layout); ++i) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i); } for (int j = 0, i = -1; j < size<1>(layout); ++j) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j); } // Footer printf(latex_footer); } // Generic ThrVal 2D Layout to Latex TIKZ -- 8-value color coded by thread template <class Layout, class ThrID> CUTE_HOST_DEVICE void print_latex(Layout const& layout, ThrID const& thr) // (m,n) -> (tid,vid) and tid -> thr_idx { CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{}); char const* latex_header = "\\documentclass[convert]{standalone}\n" "\\usepackage{tikz}\n\n" "\\begin{document}\n" "\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n"; char const* latex_footer = "\\end{tikzpicture}\n" "\\end{document}\n"; char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}", "{rgb,255:red,175;green,255;blue,175}", "{rgb,255:red,255;green,255;blue,175}", "{rgb,255:red,255;green,175;blue,175}", "{rgb,255:red,210;green,210;blue,255}", "{rgb,255:red,210;green,255;blue,210}", "{rgb,255:red,255;green,255;blue,210}", "{rgb,255:red,255;green,210;blue,210}"}; // Header printf("%% layout: "); print(layout); printf("\n"); printf("%% thrid: "); print(thr); printf("\n\n"); printf(latex_header); // Layout for (int i = 0; i < size<0>(layout); ++i) { for (int j = 0; j < size<1>(layout); ++j) { int thrid = layout(i,j) % size(thr); int val_idx = layout(i,j) / size(thr); int thr_idx = thr(thrid); printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n", color_map[thr_idx % 8], i, j, thr_idx, val_idx); } } // Labels for (int i = 0, j = -1; i < size<0>(layout); ++i) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i); } for (int j = 0, i = -1; j < size<1>(layout); ++j) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j); } // Footer printf(latex_footer); } } // end namespace cute // // Extended Layouts // #include <cute/swizzle_layout.hpp>
cutlass/include/cute/layout.hpp/0
{ "file_path": "cutlass/include/cute/layout.hpp", "repo_id": "cutlass", "token_count": 24507 }
17
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/container/tuple.hpp> #include <cute/algorithm/tuple_algorithms.hpp> #include <cute/numeric/integer_sequence.hpp> #include <cute/numeric/integral_constant.hpp> #include <cute/numeric/math.hpp> namespace cute { // A generic Swizzle functor /* 0bxxxxxxxxxxxxxxxYYYxxxxxxxZZZxxxx * ^--^ MBase is the number of least-sig bits to keep constant * ^-^ ^-^ BBits is the number of bits in the mask * ^---------^ SShift is the distance to shift the YYY mask * (pos shifts YYY to the right, neg shifts YYY to the left) * * e.g. Given * 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxZZxxx * the result is * 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxAAxxx where AA = ZZ xor YY */ template <int BBits, int MBase, int SShift = BBits> struct Swizzle { static constexpr int num_bits = BBits; static constexpr int num_base = MBase; static constexpr int num_shft = SShift; static_assert(num_base >= 0, "MBase must be positive."); static_assert(num_bits >= 0, "BBits must be positive."); static_assert(abs(num_shft) >= num_bits, "abs(SShift) must be more than BBits."); // using 'int' type here to avoid unintentially casting to unsigned... unsure. using bit_msk = cute::constant<int, (1 << num_bits) - 1>; using yyy_msk = cute::constant<int, bit_msk{} << (num_base + max(0,num_shft))>; using zzz_msk = cute::constant<int, bit_msk{} << (num_base - min(0,num_shft))>; using msk_sft = cute::constant<int, num_shft>; static constexpr uint32_t swizzle_code = uint32_t(yyy_msk{} | zzz_msk{}); template <class Offset> CUTE_HOST_DEVICE constexpr static auto apply(Offset const& offset) { return offset ^ shiftr(offset & yyy_msk{}, msk_sft{}); // ZZZ ^= YYY } template <class Offset> CUTE_HOST_DEVICE constexpr auto operator()(Offset const& offset) const { return apply(offset); } template <int B, int M, int S> CUTE_HOST_DEVICE constexpr auto operator==(Swizzle<B,M,S> const&) const { return B == BBits && M == MBase && S == SShift; } }; // // make_swizzle<0b1000, 0b0100>() -> Swizzle<1,2,1> // make_swizzle<0b11000000, 0b00000110>() -> Swizzle<2,1,5> // template <uint32_t Y, uint32_t Z> CUTE_HOST_DEVICE constexpr auto make_swizzle() { constexpr uint32_t BZ = popcount(Y); // Number of swizzle bits constexpr uint32_t BY = popcount(Z); // Number of swizzle bits static_assert(BZ == BY, "Number of bits in Y and Z don't match"); constexpr uint32_t TZ_Y = countr_zero(Y); // Number of trailing zeros in Y constexpr uint32_t TZ_Z = countr_zero(Z); // Number of trailing zeros in Z constexpr uint32_t M = cute::min(TZ_Y, TZ_Z) % 32; constexpr int32_t S = int32_t(TZ_Y) - int32_t(TZ_Z); // Difference in trailing zeros static_assert((Y | Z) == Swizzle<BZ,M,S>::swizzle_code, "Something went wrong."); return Swizzle<BZ,M,S>{}; } template <int B0, int M0, int S0, int B1, int M1, int S1> CUTE_HOST_DEVICE constexpr auto composition(Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>) { static_assert(S0 == S1, "Can only merge swizzles of the same shift."); constexpr uint32_t Y = Swizzle<B0,M0,S0>::yyy_msk::value ^ Swizzle<B1,M1,S1>::yyy_msk::value; constexpr uint32_t Z = Swizzle<B0,M0,S0>::zzz_msk::value ^ Swizzle<B1,M1,S1>::zzz_msk::value; return make_swizzle<Y,Z>(); //return ComposedFn<Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>>{}; } // // Utility for slicing and swizzle "offsets" // // For swizzle functions, it is often needed to keep track of which bits are // consumed and which bits are free. Furthermore, it is useful to know whether // each of these bits is known statically or dynamically. // MixedBits is an 32-bit unsigned integer class where some bits are known statically // and some bits are known dynamically. These sets of bits are disjoint and it is // known statically which bits are known dynamically. // MixedBits can only be manipulated through bitwise operations // Abstract value: StaticInt | (dynamic_int_ & StaticFlags) template <uint32_t StaticInt, uint32_t StaticFlags> // 0: static, 1: dynamic struct MixedBits { // Representation invariants static_assert(StaticFlags != 0, "Should be at least one dynamic bit in MixedBits."); static_assert((StaticInt & StaticFlags) == 0, "No static/dynamic overlap allowed in MixedBits."); uint32_t dynamic_int_; // assert((dynamic_int_ & ~StaticFlags) == 0); CUTE_HOST_DEVICE constexpr operator uint32_t() const noexcept { return StaticInt | dynamic_int_; } }; // Return a value representing (C<s>{} | (d & C<f>)) potentially using MixedBits to track s and f. // This maker does allow ((s & f) != 0) and enforces the MixedBits invariant before creation. template <auto s, class DynamicType, auto f> CUTE_HOST_DEVICE constexpr auto make_mixed_bits(C<s>, DynamicType const& d, C<f>) { static_assert(is_integral<DynamicType>::value); constexpr uint32_t new_f = uint32_t(f) & ~uint32_t(s); // StaticBits take precedence, M<0,f>{d} | C<s>{} if constexpr (new_f == 0 || is_static<DynamicType>::value) { return C<s>{} | (d & C<new_f>{}); // Just return a static int } else { return MixedBits<s, new_f>{uint32_t(d) & new_f}; // MixedBits } CUTE_GCC_UNREACHABLE; } // // Operators // // Equality template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator==(MixedBits<S0,F0> const& m, C<S1>) { return (S0 == (uint32_t(S1) & ~F0)) && (m.dynamic_int_ == (uint32_t(S1) & F0)); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator==(C<S1> s, MixedBits<S0,F0> const& m) { return m == s; } // Bitwise AND template <uint32_t S0, uint32_t F0, uint32_t S1, uint32_t F1> CUTE_HOST_DEVICE constexpr auto operator&(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1) { // Truth table for (S0,D0,F0) & (S1,D1,F1) -> (S,D,F) // S0D0F0 | 0X0 | 001 | 011 | 1X0 | // S1D1F1 // 0X0 | 0X0 | 0X0 | 0X0 | 0X0 | // 001 | 0X0 | 001 | 001 | 001 | // 011 | 0X0 | 001 | 011 | 011 | // 1X0 | 0X0 | 001 | 011 | 1X0 | return make_mixed_bits(C<S0 & S1>{}, //(S0 | m0.dynamic_int_) & (S1 | m1.dynamic_int_), ((S1 & F0) & m0.dynamic_int_) | ((S0 & F1) & m1.dynamic_int_) | (m0.dynamic_int_ & m1.dynamic_int_), C<(S1 & F0) | (S0 & F1) | (F0 & F1)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator&(MixedBits<S0,F0> const& m, C<S1>) { return make_mixed_bits(C<S0 & uint32_t(S1)>{}, m.dynamic_int_, C<F0 & uint32_t(S1)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator&(C<S1> s, MixedBits<S0,F0> const& m) { return m & s; } // Bitwise OR template <uint32_t S0, uint32_t F0, uint32_t S1, uint32_t F1> CUTE_HOST_DEVICE constexpr auto operator|(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1) { // Truth table for (S0,D0,F0) | (S1,D1,F1) -> (S,D,F) // S0D0F0 | 0X0 | 001 | 011 | 1X0 | // S1D1F1 // 0X0 | 0X0 | 001 | 011 | 1X0 | // 001 | 001 | 001 | 011 | 1X0 | // 011 | 011 | 011 | 011 | 1X0 | // 1X0 | 1X0 | 1X0 | 1X0 | 1X0 | return make_mixed_bits(C<S0 | S1>{}, ((~S1 & F0) & m0.dynamic_int_) | ((~S0 & F1) & m1.dynamic_int_), C<(~S0 & F1) | (~S1 & F0)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator|(MixedBits<S0,F0> const& m, C<S1>) { return make_mixed_bits(C<S0 | uint32_t(S1)>{}, m.dynamic_int_, C<F0 & ~uint32_t(S1)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator|(C<S1> s, MixedBits<S0,F0> const& m) { return m | s; } // Bitwise XOR template <uint32_t S0, uint32_t F0, uint32_t S1, uint32_t F1> CUTE_HOST_DEVICE constexpr auto operator^(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1) { // Truth table for (S0,D0,F0) ^ (S1,D1,F1) -> (S,D,F) // S0D0F0 | 0X0 | 001 | 011 | 1X0 | // S1D1F1 // 0X0 | 0X0 | 001 | 011 | 1X0 | // 001 | 001 | 001 | 011 | 011 | // 011 | 011 | 011 | 001 | 001 | // 1X0 | 1X0 | 011 | 001 | 0X0 | return make_mixed_bits(C<(~S0 & S1 & ~F0) | (S0 & ~S1 & ~F1)>{}, (S0 | m0.dynamic_int_) ^ (S1 | m1.dynamic_int_), C<F0 | F1>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator^(MixedBits<S0,F0> const& m, C<S1>) { return make_mixed_bits(C<(~S0 & uint32_t(S1) & ~F0) | (S0 & ~uint32_t(S1))>{}, (S0 | m.dynamic_int_) ^ uint32_t(S1), C<F0>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator^(C<S1> s, MixedBits<S0,F0> const& m) { return m ^ s; } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator<<(MixedBits<S0,F0> const& m, C<S1>) { return make_mixed_bits(C<(S0 << S1)>{}, m.dynamic_int_ << S1, C<(F0 << S1)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto operator>>(MixedBits<S0,F0> const& m, C<S1>) { return make_mixed_bits(C<(S0 >> S1)>{}, m.dynamic_int_ >> S1, C<(F0 >> S1)>{}); } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto shiftl(MixedBits<S0,F0> const& m, C<S1> s) { if constexpr (S1 >= 0) { return m << s; } else { return m >> -s; } } template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto shiftr(MixedBits<S0,F0> const& m, C<S1> s) { if constexpr (S1 >= 0) { return m >> s; } else { return m << -s; } } // // upcast and downcast // template <uint32_t S0, uint32_t F0, auto S1> CUTE_HOST_DEVICE constexpr auto safe_div(MixedBits<S0,F0> const& m, C<S1> s) { static_assert(has_single_bit(uint32_t(S1)), "Only divide MixedBits by powers of two."); return make_mixed_bits(safe_div(C<S0>{}, s), safe_div(m.dynamic_int_, s), safe_div(C<F0>{}, s)); } template <uint32_t N, uint32_t S0, uint32_t F0> CUTE_HOST_DEVICE constexpr auto upcast(MixedBits<S0,F0> const& m) { static_assert(has_single_bit(N), "Only divide MixedBits by powers of two."); return safe_div(m, C<N>{}); } template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)> CUTE_HOST_DEVICE constexpr auto upcast(T const& m) { return safe_div(m, C<N>{}); } template <uint32_t N, uint32_t S0, uint32_t F0> CUTE_HOST_DEVICE constexpr auto downcast(MixedBits<S0,F0> const& m) { static_assert(has_single_bit(N), "Only scale MixedBits by powers of two."); return make_mixed_bits(C<S0 * N>{}, m.dynamic_int_ * N, C<F0 * N>{}); } template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)> CUTE_HOST_DEVICE constexpr auto downcast(T const& m) { return m * C<N>{}; } // // Convert a Pow2Layout+Coord to a MixedBits // template <class Shape, class Stride, class Coord> CUTE_HOST_DEVICE constexpr auto to_mixed_bits(Shape const& shape, Stride const& stride, Coord const& coord) { if constexpr (is_tuple<Shape>::value && is_tuple<Stride>::value && is_tuple<Coord>::value) { static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched ranks"); static_assert(tuple_size<Shape>::value == tuple_size<Coord >::value, "Mismatched ranks"); return transform_apply(shape, stride, coord, [](auto const& s, auto const& d, auto const& c) { return to_mixed_bits(s,d,c); }, [](auto const&... a) { return (a ^ ...); }); } else if constexpr (is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value) { static_assert(decltype(shape*stride)::value == 0 || has_single_bit(decltype(shape*stride)::value), "Requires pow2 shape*stride."); return make_mixed_bits(Int<0>{}, coord * stride, (shape - Int<1>{}) * stride); } else { static_assert(is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value, "Either Shape, Stride, and Coord must be all tuples, or they must be all integral (in the sense of cute::is_integral)."); } CUTE_GCC_UNREACHABLE; } template <class Layout, class Coord> CUTE_HOST_DEVICE constexpr auto to_mixed_bits(Layout const& layout, Coord const& coord) { return to_mixed_bits(layout.shape(), layout.stride(), idx2crd(coord, layout.shape())); } // // Display utilities // template <int B, int M, int S> CUTE_HOST_DEVICE void print(Swizzle<B,M,S> const&) { printf("Sw<%d,%d,%d>", B, M, S); } template <uint32_t S, uint32_t F> CUTE_HOST_DEVICE void print(MixedBits<S,F> const& m) { printf("M_%u|(%u&%u)=%u", S, m.dynamic_int_, F, uint32_t(m)); } #if !defined(__CUDACC_RTC__) template <int B, int M, int S> CUTE_HOST std::ostream& operator<<(std::ostream& os, Swizzle<B,M,S> const&) { return os << "Sw<" << B << "," << M << "," << S << ">"; } template <uint32_t S, class D, uint32_t F> CUTE_HOST std::ostream& operator<<(std::ostream& os, MixedBits<S,F> const& m) { return os << "M_" << S << "|(" << m.dynamic_int_ << "&" << F << ")=" << uint32_t(m); } #endif // !defined(__CUDACC_RTC__) } // end namespace cute
cutlass/include/cute/swizzle.hpp/0
{ "file_path": "cutlass/include/cute/swizzle.hpp", "repo_id": "cutlass", "token_count": 6999 }
18
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #include "cutlass/arch/mma.h" #include "cutlass/complex.h" #include "cutlass/quaternion.h" #include "cutlass/functional.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma<gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, float, LayoutB, float, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAdd; using ElementC = float; CUTLASS_HOST_DEVICE void operator()( Array<float, 1> &d, Array<float, 1> const &a, Array<float, 1> const &b, Array<float, 1> const &c ) { d[0] = a[0] * b[0] + c[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma<gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, double, LayoutB, double, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAdd; using ElementC = double; CUTLASS_HOST_DEVICE void operator()( Array<double, 1> &d, Array<double, 1> const &a, Array<double, 1> const &b, Array<double, 1> const &c ) { d[0] = a[0] * b[0] + c[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma<gemm::GemmShape<1, 1, 1>, 1, int, LayoutA, int, LayoutB, int, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAdd; using ElementC = int; CUTLASS_HOST_DEVICE void operator()( Array<int, 1> &d, Array<int, 1> const &a, Array<int, 1> const &b, Array<int, 1> const &c ) { d[0] = a[0] * b[0] + c[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<float>; CUTLASS_HOST_DEVICE void operator()( Array<complex<float>, 1> &d, Array<complex<float>, 1> const &a, Array<complex<float>, 1> const &b, Array<complex<float>, 1> const &c ) { d[0].real() = a[0].real() * b[0].real() + c[0].real(); d[0].imag() = a[0].imag() * b[0].real() + c[0].imag(); d[0].real() = -a[0].imag() * b[0].imag() + d[0].real(); d[0].imag() = a[0].real() * b[0].imag() + d[0].imag(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, complex<float>, LayoutA, float, LayoutB, complex<float>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<float>; CUTLASS_HOST_DEVICE void operator()( Array<complex<float>, 1> &d, Array<complex<float>, 1> const &a, Array<float, 1> const &b, Array<complex<float>, 1> const &c ) { d[0].real() = a[0].real() * b[0] + c[0].real(); d[0].imag() = a[0].imag() * b[0] + c[0].imag(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<float>; CUTLASS_HOST_DEVICE void operator()( Array<complex<float>, 1> &d, Array<float, 1> const &a, Array<complex<float>, 1> const &b, Array<complex<float>, 1> const &c ) { d[0].real() = a[0] * b[0].real() + c[0].real(); d[0].imag() = a[0] * b[0].imag() + d[0].imag(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, complex<double>, LayoutA, complex<double>, LayoutB, complex<double>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<double>; CUTLASS_HOST_DEVICE void operator()( Array<complex<double>, 1> &d, Array<complex<double>, 1> const &a, Array<complex<double>, 1> const &b, Array<complex<double>, 1> const &c ) { d[0].real() = a[0].real() * b[0].real() + c[0].real(); d[0].imag() = a[0].imag() * b[0].real() + c[0].imag(); d[0].real() = -a[0].imag() * b[0].imag() + d[0].real(); d[0].imag() = a[0].real() * b[0].imag() + d[0].imag(); } }; /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, complex<double>, LayoutA, double, LayoutB, complex<double>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<double>; CUTLASS_HOST_DEVICE void operator()( Array<complex<double>, 1> &d, Array<complex<double>, 1> const &a, Array<double, 1> const &b, Array<complex<double>, 1> const &c ) { d[0].real() = a[0].real() * b[0] + c[0].real(); d[0].imag() = a[0].imag() * b[0] + c[0].imag(); } }; /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma< gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, complex<double>, LayoutB, complex<double>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAddComplex; using ElementC = complex<double>; CUTLASS_HOST_DEVICE void operator()( Array<complex<double>, 1> &d, Array<double, 1> const &a, Array<complex<double>, 1> const &b, Array<complex<double>, 1> const &c ) { d[0].real() = a[0] * b[0].real() + c[0].real(); d[0].imag() = a[0] * b[0].imag() + d[0].imag(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma<gemm::GemmShape<1, 1, 1>, 1, half_t, LayoutA, half_t, LayoutB, float, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAdd; using ElementC = float; CUTLASS_HOST_DEVICE void operator()( Array<float, 1> &d, Array<half_t, 1> const &a, Array<half_t, 1> const &b, Array<float, 1> const &c ) { d[0] = float(a[0]) * float(b[0]) + c[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation for Quaternions template < /// Layout of A matrix typename LayoutA, /// Layout of B matrix typename LayoutB, /// Layout of C matrix typename LayoutC > struct Mma<gemm::GemmShape<1, 1, 1>, 1, Quaternion<float>, LayoutA, Quaternion<float>, LayoutB, Quaternion<float>, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 1, 1>; using Operator = OpMultiplyAdd; using Element = Quaternion<float>; using ElementC = Element; CUTLASS_HOST_DEVICE void operator()( Array<Element, 1> &d, Array<Element, 1> const &a, Array<Element, 1> const &b, Array<Element, 1> const &c ) { multiply_add<Element, Element, Element> op; d[0] = op(a[0], b[0], c[0]); } }; } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/arch/mma_sm50.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sm50.h", "repo_id": "cutlass", "token_count": 3887 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/layout/matrix.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { //////////////////////////////////////////////////////////////////////////////// // // WMMA template structure defines nvcuda::wmma::fragments and static assert for // wmma native instruction sizes supported for int8_t // //////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename LayoutA_, typename LayoutB_, typename LayoutC_> struct Wmma< Shape_, ///< Size of the matrix product (concept: GemmShape) int8_t, ///< ElementA LayoutA_, ///< LayoutA int8_t, ///< ElementB LayoutB_, ///< LayoutB int32_t, ///< ElementC LayoutC_, ///< LayoutC cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc) > { #if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED) using Shape = Shape_; using ElementA = int8_t; using LayoutA = LayoutA_; using ElementB = int8_t; using LayoutB = LayoutB_; using ElementC = int32_t; using LayoutC = LayoutC_; using Operator = cutlass::arch::OpMultiplyAdd; using ArchTag = arch::Sm72; // check supported wmma shape for the given multiplicand data types static_assert( platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value || platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value || platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value, "Supported list of wmma operator shape for s8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16"); // Wmma Fragment using FragmentA = nvcuda::wmma::fragment< nvcuda::wmma::matrix_a, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementA>::Type, typename CutlassToWmmaLayout<LayoutA>::Layout>; using FragmentB = nvcuda::wmma::fragment< nvcuda::wmma::matrix_b, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementB>::Type, typename CutlassToWmmaLayout<LayoutB>::Layout>; using FragmentC = nvcuda::wmma::fragment< nvcuda::wmma::accumulator, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementC>::Type>; /// Performs a nvcuda::wmma matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C) const { nvcuda::wmma::mma_sync(D, A, B, C); } #else static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond"); #endif }; //////////////////////////////////////////////////////////////////////////////// // // WMMA template structure defines nvcuda::wmma::fragments and static assert for // wmma native instruction sizes supported for uint8_t // //////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename LayoutA_, typename LayoutB_, typename LayoutC_> struct Wmma< Shape_, ///< Size of the matrix product (concept: GemmShape) uint8_t, ///< ElementA LayoutA_, ///< LayoutA uint8_t, ///< ElementB LayoutB_, ///< LayoutB int32_t, ///< ElementC LayoutC_, ///< LayoutC cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc) > { #if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED) using Shape = Shape_; using ElementA = uint8_t; using LayoutA = LayoutA_; using ElementB = uint8_t; using LayoutB = LayoutB_; using ElementC = int32_t; using LayoutC = LayoutC_; using Operator = cutlass::arch::OpMultiplyAdd; using ArchTag = arch::Sm72; // check supported wmma shape for the given multiplicand data types static_assert( platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value || platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value || platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value, "Supported list of wmma operator shape for u8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16"); // Wmma Fragment using FragmentA = nvcuda::wmma::fragment< nvcuda::wmma::matrix_a, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementA>::Type, typename CutlassToWmmaLayout<LayoutA>::Layout>; using FragmentB = nvcuda::wmma::fragment< nvcuda::wmma::matrix_b, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementB>::Type, typename CutlassToWmmaLayout<LayoutB>::Layout>; using FragmentC = nvcuda::wmma::fragment< nvcuda::wmma::accumulator, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementC>::Type>; /// Performs a nvcuda::wmma matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C) const { nvcuda::wmma::mma_sync(D, A, B, C); } #else static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond"); #endif }; } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/wmma_sm72.h/0
{ "file_path": "cutlass/include/cutlass/arch/wmma_sm72.h", "repo_id": "cutlass", "token_count": 3101 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level fused activation's scale+bias+relu and implicit GEMM convolution definitions that combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h" #include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for fused batch norm and Conv2dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kUnity > struct DefaultConv2dFpropFusion; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and /// multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag >; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h", "repo_id": "cutlass", "token_count": 3829 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Depthwise implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/kernel/direct_convolution.h" #include "cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/depthwise_fprop_pipelined.h" // Direct Conv Related Header files #include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h" #include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h" #include "cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h" #include "cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for DepthwiseFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, conv::StrideSupport StrideSupport = StrideSupport::kUnity, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = cutlass::sizeof_bits<ElementB>::value / cutlass::sizeof_bits<ElementB>::value > struct DefaultDepthwiseFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for DepthwiseFprop with direct convolution algorithm template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, conv::StrideSupport StrideSupport = StrideSupport::kUnity, // MatrixShape<Height, Width> typename StrideShape = cutlass::MatrixShape<-1, -1>, // MatrixShape< Height, Width> typename DilationShape = cutlass::MatrixShape<-1, -1>, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultDepthwiseDirect2dConvFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for Analytic IteratorAlgorithm template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultDepthwiseFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, // cutlass::arch::OpMultiplyAdd IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseMmaCoreWithLaneAccessSize< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, sizeof_bits<ElementB>::value, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB, cutlass::conv::GroupMode::kDepthwise > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::DepthwiseFpropPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for direct 2d conv implementation, /// multiple stage pipeline, and SIMT-based mainloop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, typename StrideShape, typename DilationShape, int AlignmentA, int AlignmentB > struct DefaultDepthwiseDirect2dConvFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, StrideShape, DilationShape, AlignmentA, AlignmentB > { // One warp handles the entrie groups per cta. static_assert(ThreadblockShape::kN == WarpShape::kN, "ThreadblockShape::kN should be same as WarpShape::kN "); static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount, "ThreadblockShape::kK and WarpShape::kK should be same as filter size"); static_assert(ThreadblockShape::kM % WarpShape::kM == 0, "ThreadblockShape::kM must be divisible by WarpShape shape::kM"); static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1"); // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize< ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, 128, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta> ThreadBlockOutputShape, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; using ThreadOutputShape = typename MmaCore::ThreadOutputShape; static cutlass::arch::CacheOperation::Kind const CacheOpA = ((sizeof_bits<ElementA>::value * AlignmentA) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt< ThreadblockShape, // < outputShape:KMNK, groups per cta> WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, ThreadOutputShape, ThreadBlockOutputShape >::Epilogue; // Define the Mma using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage< ThreadblockShape, IteratorA, SmemIteratorA, CacheOpA, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages, Epilogue >; // Define the kernel using Kernel = cutlass::conv::kernel::DirectConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise, ThreadBlockOutputShape >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for direct 2d conv implementation, /// multiple stage pipeline, and SIMT-based mainloop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, typename StrideShape, typename DilationShape, int AlignmentA, int AlignmentB > struct DefaultDepthwiseDirect2dConvFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kFixedStrideDilation, StrideSupport, StrideShape, DilationShape, AlignmentA, AlignmentB > { // One warp handles the entrie groups per cta. static_assert(ThreadblockShape::kN == WarpShape::kN, "ThreadblockShape::kN should be same as WarpShape::kN "); static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount, "ThreadblockShape::kK and WarpShape::kK should be same as filter size"); static_assert(ThreadblockShape::kM % WarpShape::kM == 0, "ThreadblockShape::kM must be divisible by WarpShape shape::kM"); static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1"); static_assert(StrideShape::kRow >= 0 && StrideShape::kColumn >= 0, "Stride should be fixed"); static_assert(DilationShape::kRow >= 0 && DilationShape::kColumn >= 0, "Stride should be fixed"); // Activations loaded by threadblock static int const ActivationShapeH = (ThreadBlockOutputShape::kH - 1) * StrideShape::kRow + (FilterShape::kRow - 1) * DilationShape::kRow + 1; static int const ActivationShapeW = (ThreadBlockOutputShape::kW - 1) * StrideShape::kColumn + (FilterShape::kColumn - 1) * DilationShape::kColumn + 1; using ActivationShape = cutlass::conv::TensorNHWCShape<1, ActivationShapeH, ActivationShapeW, ThreadblockShape::kN >; // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize< ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, 128, Stages, MathOperatorTag, IteratorAlgorithm::kFixedStrideDilation, StrideShape, DilationShape, ActivationShape>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation< cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta> ThreadBlockOutputShape, StrideShape, DilationShape, ActivationShape, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; using ThreadOutputShape = typename MmaCore::ThreadOutputShape; static cutlass::arch::CacheOperation::Kind const CacheOpA = ((sizeof_bits<ElementA>::value * AlignmentA) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt< ThreadblockShape, // < outputShape:KMNK, groups per cta> WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, ThreadOutputShape, ThreadBlockOutputShape >::Epilogue; // Define the Mma using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage< ThreadblockShape, IteratorA, SmemIteratorA, CacheOpA, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages, Epilogue, IteratorAlgorithm::kFixedStrideDilation >; // Define the kernel using Kernel = cutlass::conv::kernel::DirectConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise, ThreadBlockOutputShape >; }; } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/default_depthwise_fprop.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/default_depthwise_fprop.h", "repo_id": "cutlass", "token_count": 6502 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile) matrix from memory. This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename Layout_, typename ThreadMap_, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> > class Conv2dFpropActivationTileAccessIteratorOptimized { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = Layout_; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; using Mask = uint64_t; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv2dFpropActivationIteratorOptimizedParams<Layout>; private: Params const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; // One pointer per access char const *pointer_[ThreadMap::Iterations::kStrided]; // current filter position (r, s) int filter_r_; int filter_s_; int filter_c_; Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2]; public: CUTLASS_HOST_DEVICE Conv2dFpropActivationTileAccessIteratorOptimized( Params const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles ): params_(params), problem_size_(problem_size), filter_c_(0), filter_r_(0), filter_s_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.column() + thread_coord.contiguous(); int offset_n[ThreadMap::Iterations::kStrided]; int offset_p[ThreadMap::Iterations::kStrided]; int offset_q[ThreadMap::Iterations::kStrided]; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { pointer_[s] = reinterpret_cast<char const *>(ptr); int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; // The subseqnet fast_divmod() operations are equivalent to the following logical computation: // // // offset_n[s] = offset_npq / (problem_size_.P * problem_size_.Q); // int residual = offset_npq % (problem_size_.P * problem_size_.Q); // // offset_p[s] = residual / problem_size_.Q; // offset_q[s] = residual % problem_size_.Q; // int residual; params.pq_divmod(offset_n[s], residual, offset_npq); params.q_divmod(offset_p[s], offset_q[s], residual); TensorCoord coord = at_(offset_n[s], offset_p[s], offset_q[s], 0, 0); pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8; } clear_mask(); CUTLASS_PRAGMA_NO_UNROLL for (int r = 0; r < problem_size_.R; ++r) { CUTLASS_PRAGMA_UNROLL for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) { int r_ = r; if (problem_size_.mode == Mode::kConvolution) { r_ = problem_size_.R - 1 - r; } int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h; bool pred = (offset_n[s_idx] < problem_size_.N && h >= 0 && h < problem_size_.H); CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { masks_[s_idx][v_idx][0] |= (pred << r); } } } CUTLASS_PRAGMA_NO_UNROLL for (int s = 0; s < problem_size_.S; ++s) { CUTLASS_PRAGMA_UNROLL for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) { int s_ = s; if (problem_size_.mode == Mode::kConvolution) { s_ = problem_size_.S - 1 - s; } int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w; bool pred = (w >= 0 && w < problem_size_.W); CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { masks_[s_idx][v_idx][1] |= (pred << s); } } } CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C); } set_iteration_index(0); } CUTLASS_HOST_DEVICE static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}); } private: /// Returns the coordinate in the activations tensor X that is correspoinding to // output npq and filter position r, s CUTLASS_HOST_DEVICE TensorCoord at_(int n, int p, int q, int r, int s) const { if (problem_size_.mode == Mode::kConvolution) { r = problem_size_.R - 1 - r; s = problem_size_.S - 1 - s; } int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h; int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w; return TensorCoord(n, h, w, filter_c_); } /// Adds a pointer offset in units of element CUTLASS_HOST_DEVICE void add_byte_offset_(LongIndex byte_offset) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { pointer_[s] += byte_offset; } } public: /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_HOST_DEVICE void advance() { int next_idx = 0; // moves to the next tile ++filter_s_; if (filter_s_ == problem_size_.S) { filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { next_idx = 1; } else { filter_r_ = 0; next_idx = 2; } } add_byte_offset_(params_.inc_next[next_idx]); if (next_idx == 2) { filter_c_ += params_.filter_c_delta; } CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C); } } /// Clears the predicates CUTLASS_HOST_DEVICE void clear_mask(bool clear = true) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { masks_[s][v][0] = clear ? 0 : masks_[s][v][0]; masks_[s][v][1] = clear ? 0 : masks_[s][v][1]; } } } /// Clears the predicates CUTLASS_HOST_DEVICE void clear_mask(int v, bool clear = true) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { masks_[s][v][0] = clear ? 0 : masks_[s][v][0]; masks_[s][v][1] = clear ? 0 : masks_[s][v][1]; } } CUTLASS_HOST_DEVICE bool valid() { return (masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) && (masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_; } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dFpropActivationTileAccessIteratorOptimized &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if ((problem_size.C / problem_size.groups) % AccessType::kElements) { return Status::kErrorInvalidProblem; } if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) { if (problem_size.C % 32) { return Status::kErrorInvalidProblem; } } if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) { if (problem_size.C % 64) { return Status::kErrorInvalidProblem; } } // Conv2dFpropActivationTileAccessIteratorOptimized has constraint on filter positions // due to the number of mask bits. if (problem_size.R > 32 || problem_size.S > 32) { return Status::kErrorNotSupported; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h", "repo_id": "cutlass", "token_count": 5193 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/conv/threadblock/conv3d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename Layout_, typename ThreadMap_ > class Conv3dFpropActivationTileAccessIteratorOptimized { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = Layout_; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; using Mask = uint64_t; // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv3dFpropActivationIteratorOptimizedParams<Layout>; private: Conv3dFpropActivationIteratorOptimizedParams<Layout> const &params_; Conv3dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; // One pointer per access char const *pointer_[ThreadMap::Iterations::kStrided]; // current filter position (t, r, s) int filter_t_; int filter_r_; int filter_s_; int filter_c_; // mask for t, r, and s Index masks_[ThreadMap::Iterations::kStrided][3]; public: CUTLASS_HOST_DEVICE Conv3dFpropActivationTileAccessIteratorOptimized( Conv3dFpropActivationIteratorOptimizedParams<Layout> const &params, Conv3dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles ) : params_(params), problem_size_(problem_size), filter_t_(0), filter_r_(0), filter_s_(0), filter_c_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.column() + thread_coord.contiguous(); int offset_n[ThreadMap::Iterations::kStrided]; int offset_z[ThreadMap::Iterations::kStrided]; int offset_p[ThreadMap::Iterations::kStrided]; int offset_q[ThreadMap::Iterations::kStrided]; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { pointer_[s] = reinterpret_cast<char const *>(ptr); int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; // The subseqnet fast_divmod() operations are equivalent to the following logical computation: // // // offset_n[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q); // int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q); // // offset_z[s] = residual / (problem_size_.P * problem_size_.Q); // residual = residual % (problem_size_.P * problem_size_.Q); // // offset_p[s] = residual / problem_size_.Q; // offset_q[s] = residual % problem_size_.Q; // int residual; // input: (nzpq offset) output: (n offset and resudial (zpq offset)) params.zpq_divmod(offset_n[s], residual, offset_nzpq); // input: (zpq offset) output: (z offset and resudial (pq)) params.pq_divmod(offset_z[s], residual, residual); // input: (pq offset) output: (p offset and resudial (q offset)) params.q_divmod(offset_p[s], offset_q[s], residual); TensorCoord coord = at_(offset_n[s], offset_z[s], offset_p[s], offset_q[s], 0, 0, 0); pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8; } clear_mask(); // mask predicates for filter position T CUTLASS_PRAGMA_NO_UNROLL for (int t = 0; t < problem_size_.T; ++t) { CUTLASS_PRAGMA_UNROLL for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) { int t_ = t; if (problem_size_.mode == Mode::kConvolution) { t_ = problem_size_.T - 1 - t; } int d = offset_z[s_idx] * problem_size_.stride_d - problem_size_.pad_d + t_ * problem_size_.dilation_d; bool pred = (offset_n[s_idx] < problem_size_.N && d >= 0 && d < problem_size_.D); masks_[s_idx][0] |= (pred << t); } } // mask predicates for filter position R CUTLASS_PRAGMA_NO_UNROLL for (int r = 0; r < problem_size_.R; ++r) { CUTLASS_PRAGMA_UNROLL for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) { int r_ = r; if (problem_size_.mode == Mode::kConvolution) { r_ = problem_size_.R - 1 - r; } int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h; bool pred = (h >= 0 && h < problem_size_.H); masks_[s_idx][1] |= (pred << r); } } // mask predicates for filter position S CUTLASS_PRAGMA_NO_UNROLL for (int s = 0; s < problem_size_.S; ++s) { CUTLASS_PRAGMA_UNROLL for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) { int s_ = s; if (problem_size_.mode == Mode::kConvolution) { s_ = problem_size_.S - 1 - s; } int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w; bool pred = (w >= 0 && w < problem_size_.W); masks_[s_idx][2] |= (pred << s); } } if (filter_c_ >= problem_size.C) { clear_mask(); } set_iteration_index(0); } CUTLASS_HOST_DEVICE static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}); } private: /// Returns the coordinate in the activations tensor X that is correspoinding to // output nzpq and filter position t, r, s CUTLASS_HOST_DEVICE TensorCoord at_(int n, int z, int p, int q, int t, int r, int s) const { if (problem_size_.mode == Mode::kConvolution) { t = problem_size_.T - 1 - t; r = problem_size_.R - 1 - r; s = problem_size_.S - 1 - s; } int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d; int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h; int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w; return TensorCoord(n, d, h, w, filter_c_); } /// Adds a pointer offset in units of element CUTLASS_HOST_DEVICE void add_byte_offset_(LongIndex byte_offset) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { pointer_[s] += byte_offset; } } /// Clears the predicates CUTLASS_HOST_DEVICE void clear_mask_(bool clear) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { // We are using inline PTX assembly here to avoid an CUDA C++ compilation // artifact in which control flow instructions are generated. Instead, our // intent is to predicate the mov instructions. #if defined(__CUDA_ARCH__) asm volatile( "{\n" " .reg .pred p;\n" " .reg .u32 m;" " mov.u32 m, %2;" " setp.ne.b32 p, %1, 0;\n" " @p mov.u32 m, 0;\n" " mov.u32 %0, m;\n" "}\n" : "=r"(masks_[s][0]) : "r"((int)clear), "r"(masks_[s][0]) ); asm volatile( "{\n" " .reg .pred p;\n" " .reg .u32 m;" " mov.u32 m, %2;" " setp.ne.b32 p, %1, 0;\n" " @p mov.u32 m, 0;\n" " mov.u32 %0, m;\n" "}\n" : "=r"(masks_[s][1]) : "r"((int)clear), "r"(masks_[s][1]) ); asm volatile( "{\n" " .reg .pred p;\n" " .reg .u32 m;" " mov.u32 m, %2;" " setp.ne.b32 p, %1, 0;\n" " @p mov.u32 m, 0;\n" " mov.u32 %0, m;\n" "}\n" : "=r"(masks_[s][2]) : "r"((int)clear), "r"(masks_[s][2]) ); #else if (clear) { masks_[s][0] = 0; masks_[s][1] = 0; masks_[s][2] = 0; } #endif } } public: /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_HOST_DEVICE void advance() { int next_idx = 0; // moves to the next tile ++filter_s_; if (filter_s_ == problem_size_.S) { filter_s_ = 0; ++filter_r_; next_idx = 1; if (filter_r_ == problem_size_.R) { filter_r_ = 0; ++filter_t_; if (filter_t_ < problem_size_.T) { next_idx = 2; } else { filter_t_ = 0; next_idx = 3; } } } add_byte_offset_(params_.inc_next[next_idx]); if (next_idx == 3) { filter_c_ += params_.filter_c_delta; } clear_mask_(filter_c_ >= problem_size_.C); } /// Clears the predicates CUTLASS_HOST_DEVICE void clear_mask() { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { masks_[s][0] = Mask(0); masks_[s][1] = Mask(0); masks_[s][2] = Mask(0); } } CUTLASS_HOST_DEVICE bool valid() { return (masks_[iteration_strided_][0] & (Index(1) << filter_t_)) && (masks_[iteration_strided_][1] & (Index(1) << filter_r_)) && (masks_[iteration_strided_][2] & (Index(1) << filter_s_)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dFpropActivationTileAccessIteratorOptimized &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv3dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } // Conv3dFpropActivationTileAccessIteratorOptimized has constraint on filter positions // due to the number of mask bits. if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) { return Status::kErrorNotSupported; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h", "repo_id": "cutlass", "token_count": 6154 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped fused activation's scale+bias+relu and Implicit GEMM Convolution kernel. The original implicit gemm will store out-of-bound data as zeroes in the shared memory because zeros into the tensor core, zeroes out of the tensor cores. The result is remained the same. When fusing scale+bias+relu into the mainloop, it is no longer true because 0 x scale + bias = bias which is no longer always 0. So, instead of storing zeroes, this fused kernel stores the out-of-bound data as a special NaN (0x7eff), when applying scale+bias+relu, the code is like if (data == 0x7eff) data = 0; else data = scale+bias+relu(data, scale, bias); See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" #include "cutlass/conv/warp/scale_bias_relu_transform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Element type of scale and bias vectors typename ElementScaleBias_, /// Layout of scale and bias vectors typename LayoutScaleBias_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// WarpIterator to load Scale or Bias vector from the shared memory typename WarpIteratorScaleBias_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class MmaFpropFusionBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Element type of scale and bias vectors using ElementScaleBias = ElementScaleBias_; /// Layout of scale and bias vectors using LayoutScaleBias = LayoutScaleBias_; ///< Policy describing tuning details using Policy = Policy_; ///< WarpIterator to load Scale or Bias vector from the shared memory using WarpIteratorScaleBias = WarpIteratorScaleBias_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the scale and bias vectors using TensorRefScaleBias = TensorRef<ElementScaleBias, LayoutScaleBias>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; static_assert(kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert((kWarpGemmIterations % 2) == 0, "Inner loop iteration must be an even number."); // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the A scale and bias vectors in shared memory using ShapeScaleBias = MatrixShape<1 + Policy::SmemPaddingA::kRow, 2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; /// Buffer for A operand Scale and Bias AlignedBuffer<ElementScaleBias, ShapeScaleBias::kCount> operand_A_scale_bias; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a layout object for the A scale and bias vectors CUTLASS_DEVICE static LayoutScaleBias LayoutScaleBias() { return LayoutScaleBias::packed( {ShapeScaleBias::kRow, ShapeScaleBias::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } /// Returns a TensorRef to the A operand Scale vector CUTLASS_HOST_DEVICE TensorRefScaleBias operand_A_scale_bias_ref() { return TensorRefScaleBias{operand_A_scale_bias.data(), LayoutScaleBias()}; } }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of A operand scale and bias vector /// from shared memory WarpIteratorScaleBias warp_tile_iterator_A_scale_bias_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaFpropFusionBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), warp_tile_iterator_A_scale_bias_( shared_storage.operand_A_scale_bias_ref(), lane_idx), warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over vectors of scale and bias vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorScaleBias_, /// Iterates over vectors of scale and bias vector in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorScaleBias_, /// Cache operation for scale/bias operand cutlass::arch::CacheOperation::Kind CacheOpScaleBias, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// WarpIterator to load Scale or Bias vector from the shared memory typename WarpIteratorScaleBias_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class ImplicitGemmFpropFusionMultistage : public MmaFpropFusionBase<Shape_, typename IteratorScaleBias_::Element, typename IteratorScaleBias_::Layout, Policy_, WarpIteratorScaleBias_, Stages> { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorScaleBias = IteratorScaleBias_; ///< WarpIterator to load Scale or Bias vector from the shared memory using WarpIteratorScaleBias = WarpIteratorScaleBias_; ///< Policy describing tuning details using Policy = Policy_; ///< Base class using Base = MmaFpropFusionBase<Shape_, typename IteratorScaleBias::Element, typename IteratorScaleBias::Layout, Policy, WarpIteratorScaleBias, Stages>; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; using SmemIteratorScaleBias = SmemIteratorScaleBias_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; static cutlass::arch::CacheOperation::Kind const kCacheOpScaleBias = CacheOpScaleBias; // // Dependent types // /// Fragment of accumulator tile using ElementC = typename Policy::Operator::ElementC; using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpLoadedFragmentScaleBias = typename WarpIteratorScaleBias::Fragment; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory SmemIteratorScaleBias smem_iterator_A_scale_bias_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE ImplicitGemmFpropFusionMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_A_scale_bias_(shared_storage.operand_A_scale_bias_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_A_scale_bias_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorScaleBias &iterator_A_scale_bias, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; // Uses nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } } // Async Copy for operand A scale and bias vector. Scale and bias vectors // are small. One iteration is enough. if (group_start_A == 0) { typename IteratorScaleBias::AccessType *dst_ptr = reinterpret_cast<typename IteratorScaleBias::AccessType *>( this->smem_iterator_A_scale_bias_.get()); int const kSrcBytes = sizeof_bits<typename IteratorScaleBias::Element>::value * IteratorScaleBias::kElementsPerAccess / 8; cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>( dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid()); } iterator_B.set_iteration_index(group_start_B); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over scale and bias vectors in global memory IteratorScaleBias iterator_A_scale_bias, ///< initial value of accumulator FragmentC const &src_accum, ///< number of iterations per channel int gemm_k_iterations_per_channel = 0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; // Uses Nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } // Async Copy for operand A scale and bias vectors. Scale and bias // vectors are small. One iteration is enough. { typename IteratorScaleBias::AccessType *dst_ptr = reinterpret_cast<typename IteratorScaleBias::AccessType *>( this->smem_iterator_A_scale_bias_.get()); int const kSrcBytes = sizeof_bits<typename IteratorScaleBias::Element>::value * IteratorScaleBias::kElementsPerAccess / 8; cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>( dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid()); } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } // Move to the next stage iterator_A.advance(); iterator_A_scale_bias.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpLoadedFragmentScaleBias warp_loaded_frag_A_scale_bias[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; cutlass::conv::warp::FpropScaleBiasReluTransform<WarpTransformedFragmentA, WarpLoadedFragmentScaleBias> elementwise_transform; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_A_scale_bias_.load( warp_loaded_frag_A_scale_bias[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_scale_bias_; ++this->warp_tile_iterator_B_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); elementwise_transform(warp_transformed_frag_A[0], warp_loaded_frag_A_scale_bias[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_scale_bias_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_A_scale_bias_.load( warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_scale_bias_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) { warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2], warp_loaded_frag_A_scale_bias[warp_mma_k % 2]); } warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B, group_start_iteration_A, group_start_iteration_B); if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); elementwise_transform( warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.advance(); iterator_A_scale_bias.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_A_scale_bias_.add_tile_offset( {0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_A_scale_bias_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h", "repo_id": "cutlass", "token_count": 12230 }
25
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Helper macros for the CUTLASS library */ #pragma once //////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef CUTLASS_NAMESPACE #define concat_tok(a, b) a ## b #define mkcutlassnamespace(pre, ns) concat_tok(pre, ns) #define cutlass mkcutlassnamespace(cutlass_, CUTLASS_NAMESPACE) #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) #define CUTLASS_HOST_DEVICE __forceinline__ __device__ __host__ #define CUTLASS_DEVICE __forceinline__ __device__ #elif defined(__CUDACC_RTC__) #define CUTLASS_HOST_DEVICE __forceinline__ __device__ #define CUTLASS_DEVICE __forceinline__ __device__ #else #define CUTLASS_HOST_DEVICE inline #define CUTLASS_DEVICE inline #endif #define CUTLASS_HOST __host__ #define CUTLASS_GLOBAL __global__ static //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> CUTLASS_HOST_DEVICE void __CUTLASS_UNUSED(T const &) { } #if defined(__GNUC__) #define CUTLASS_UNUSED(expr) __CUTLASS_UNUSED(expr) #else #define CUTLASS_UNUSED(expr) do { ; } while (&expr != &expr) #endif #ifdef _MSC_VER // Provides support for alternative operators 'and', 'or', and 'not' #include <iso646.h> #endif // _MSC_VER #if !defined(__CUDACC_RTC__) #include <assert.h> #endif #if defined(__CUDA_ARCH__) #if defined(_MSC_VER) #define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __FUNCSIG__); asm volatile ("brkpt;\n"); } #else #define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __PRETTY_FUNCTION__); asm volatile ("brkpt;\n"); } #endif #else #if defined(_MSC_VER) #define CUTLASS_NOT_IMPLEMENTED() assert(0 && __FUNCSIG__) #else #define CUTLASS_NOT_IMPLEMENTED() assert(0 && __PRETTY_FUNCTION__) #endif #endif //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { #ifndef CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED #define CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED 0 #endif // CUDA 10.1 introduces the mma instruction #if !defined(CUTLASS_ENABLE_TENSOR_CORE_MMA) #define CUTLASS_ENABLE_TENSOR_CORE_MMA 0 #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #define CUTLASS_ASSERT(x) assert(x) //////////////////////////////////////////////////////////////////////////////////////////////////// // CUTLASS_PRAGMA_(UNROLL|NO_UNROLL) optimization directives for the CUDA compiler. #if defined(__CUDA_ARCH__) && !defined(__INTELLISENSE__) #if defined(__CUDACC_RTC__) || (defined(__clang__) && defined(__CUDA__)) #define CUTLASS_PRAGMA_UNROLL _Pragma("unroll") #define CUTLASS_PRAGMA_NO_UNROLL _Pragma("unroll 1") #else #define CUTLASS_PRAGMA_UNROLL #pragma unroll #define CUTLASS_PRAGMA_NO_UNROLL #pragma unroll 1 #endif #define CUTLASS_GEMM_LOOP CUTLASS_PRAGMA_NO_UNROLL #else #define CUTLASS_PRAGMA_UNROLL #define CUTLASS_PRAGMA_NO_UNROLL #define CUTLASS_GEMM_LOOP #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if !defined(__CUDACC_RTC__) #define CUTLASS_THREAD_LOCAL thread_local #else #define CUTLASS_THREAD_LOCAL #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(_MSVC_LANG) # define CUTLASS_CPLUSPLUS _MSVC_LANG #else # define CUTLASS_CPLUSPLUS __cplusplus #endif #if (201700L <= CUTLASS_CPLUSPLUS) #define CUTLASS_CONSTEXPR_IF_CXX17 constexpr #define CUTLASS_CXX17_OR_LATER 1 #else #define CUTLASS_CONSTEXPR_IF_CXX17 #define CUTLASS_CXX17_OR_LATER 0 #endif //////////////////////////////////////////////////////////////////////////////////////////////////// }; // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/detail/helper_macros.hpp/0
{ "file_path": "cutlass/include/cutlass/detail/helper_macros.hpp", "repo_id": "cutlass", "token_count": 1854 }
26
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cutlass/numeric_conversion.h> ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::fusion { ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // Fusion Operations // Template args must not be implementation dependent // ///////////////////////////////////////////////////////////////////////////////////////////////// struct FusionOperation { // metadata types/queries that can be overrided using ElementOutput = void; using ElementCompute = void; using ElementSource = void; static constexpr bool IsSourceSupported = false; using ElementScalar = void; static constexpr int AlignmentScalar = 0; static constexpr bool IsScaleFactorSupported = false; static constexpr bool IsPerRowScaleSupported = false; using ElementBias = void; static constexpr int AlignmentBias = 0; static constexpr bool IsPerRowBiasSupported = false; static constexpr bool IsDePerRowBiasSupported = false; using ActivationFn = void; static constexpr bool IsEltActSupported = false; static constexpr bool IsDeEltActSupported = false; using ElementAux = void; using GmemLayoutTagAux = void; static constexpr int AlignmentAux = 0; static constexpr bool IsAuxOutSupported = false; static constexpr bool IsAuxInSupported = false; using ElementAmax = void; static constexpr bool IsAbsMaxSupported = false; }; // D = alpha * acc template< class ElementOutput_, class ElementCompute_, class ElementScalar_ = ElementCompute_, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct ScaledAcc : FusionOperation { using ElementOutput = ElementOutput_; using ElementCompute = ElementCompute_; using ElementScalar = ElementScalar_; static constexpr int AlignmentScalar = 1; static constexpr auto RoundStyle = RoundStyle_; }; // D = alpha * acc + beta * C template< class ElementOutput_, class ElementCompute_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinearCombination : ScaledAcc<ElementOutput_, ElementCompute_, ElementScalar_, RoundStyle_> { using ElementSource = ElementSource_; static constexpr bool IsSourceSupported = true; }; // D = activation(alpha * acc + beta * C) template< template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombEltAct : LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> { using ActivationFn = ActivationFn_<ElementCompute_>; static constexpr bool IsEltActSupported = true; }; // D = alpha * acc + beta * C + per-row bias template< class ElementOutput_, class ElementCompute_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombPerRowBias : LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> { using ElementBias = ElementBias_; static constexpr int AlignmentBias = AlignmentBias_; static constexpr bool IsPerRowBiasSupported = true; }; // D = activation(alpha * acc + beta * C + per-row bias) template< template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombPerRowBiasEltAct : LinCombPerRowBias<ElementOutput_, ElementCompute_, ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> { using ActivationFn = ActivationFn_<ElementCompute_>; static constexpr bool IsEltActSupported = true; }; // D = activation(alpha * acc + beta * C + per-row bias) // aux = alpha * acc + beta * C + per-row bias template< class GmemLayoutTagAux_, template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementAux_ = ElementOutput_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombPerRowBiasEltActAux : LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_, ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> { using ElementAux = ElementAux_; using GmemLayoutTagAux = GmemLayoutTagAux_; static constexpr int AlignmentAux = AlignmentAux_; static constexpr bool IsAuxOutSupported = true; }; // D = activation(per-row alpha * acc + per-row beta * C + per-row bias) template< template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, // per-row alpha/beta int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, int AlignmentScalar_ = 128 / sizeof_bits_v<ElementScalar_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct PerRowLinCombPerRowBiasEltAct : LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_, ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> { static constexpr int AlignmentScalar = AlignmentScalar_; static constexpr bool IsPerRowScaleSupported = true; }; // Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias // if D is fp8 // D = scale_d * activation(Z) // else // D = activation(Z) template< template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct ScaledLinCombPerRowBiasEltAct : LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_, ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> { static constexpr bool IsScaleFactorSupported = true; }; // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias // if D is fp8 // amax_d = max(abs(elements in activation(Z))) // D = scale_d * activation(Z) // else // D = activation(Z) // if Aux is fp8 // amax_aux = max(abs(elements in Z)) // Aux = scale_aux * Z // else // Aux = Z template< class GmemLayoutTagAux_, template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementAux_ = ElementOutput_, class ElementAmax_ = ElementCompute_, class ElementBias_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct ScaledLinCombPerRowBiasEltActAmaxAux : ScaledLinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_, ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> { using ElementAmax = ElementAmax_; static constexpr bool IsAbsMaxSupported = true; using ElementAux = ElementAux_; using GmemLayoutTagAux = GmemLayoutTagAux_; static constexpr int AlignmentAux = AlignmentAux_; static constexpr bool IsAuxOutSupported = true; }; // Z = Aux // dY = alpha * acc + beta * C // D = d_activation(dY, Z) template< class GmemLayoutTagAux_, template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementAux_ = ElementOutput_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombDeEltAct : LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> { using ActivationFn = ActivationFn_<ElementCompute_>; static constexpr bool IsDeEltActSupported = true; using ElementAux = ElementAux_; using GmemLayoutTagAux = GmemLayoutTagAux_; static constexpr int AlignmentAux = AlignmentAux_; static constexpr bool IsAuxInSupported = true; }; // Z = Aux // dY = alpha * acc + beta * C // D = d_activation(dY, Z) // dBias = sum of columns of D template< class GmemLayoutTagAux_, template <class> class ActivationFn_, class ElementOutput_, class ElementCompute_, class ElementAux_ = ElementOutput_, class ElementBias_ = ElementCompute_, class ElementSource_ = ElementOutput_, class ElementScalar_ = ElementCompute_, int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>, int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>, FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest > struct LinCombDeEltActDePerRowBias : LinCombDeEltAct<GmemLayoutTagAux_, ActivationFn_, ElementOutput_, ElementCompute_, ElementAux_, ElementSource_, ElementScalar_, AlignmentAux_, RoundStyle_> { using ElementBias = ElementBias_; static constexpr int AlignmentBias = AlignmentBias_; static constexpr bool IsDePerRowBiasSupported = true; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::fusion ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/fusion/operations.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/fusion/operations.hpp", "repo_id": "cutlass", "token_count": 3682 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// template <class Activation, class = void> struct GenericActivationTraits { static constexpr bool IsArgumentsNeeded = false; struct Arguments {}; }; template <class Activation> struct GenericActivationTraits<Activation, decltype(typename Activation::Arguments(), void())> { static constexpr bool IsArgumentsNeeded = true; using Arguments = typename Activation::Arguments; }; template <typename T> struct LinearCombinationGenericParams { T alpha; ///< scales accumulators T beta; ///< scales source tensor T const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory T const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE LinearCombinationGenericParams(): alpha(T(1)), beta(T(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE LinearCombinationGenericParams( T alpha, T beta = T(0) ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE LinearCombinationGenericParams( T const *alpha_ptr, T const *beta_ptr = nullptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator followed by an activation function to an array of elements. /// /// D = activation(alpha * accumulator + beta * source + uniform) /// template < template<typename T> class ActivationFunctor, typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, bool IsHeavy = false > class LinearCombinationGeneric { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static bool const kIsHeavy = IsHeavy; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentSource = Array<ElementOutput, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params : LinearCombinationGenericParams<ElementCompute>, GenericActivationTraits<ActivationFunctor<ElementCompute>>::Arguments { using LinearCombinationGenericParams<ElementCompute>::LinearCombinationGenericParams; }; private: // // Data members // Params params_; bool skip_elementwise_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationGeneric(Params const &params) { params_ = params; params_.alpha = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); params_.beta = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return params_.beta != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { params_.beta = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(params_.beta, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X } if constexpr (GenericActivationTraits<ActivationFunctor<ElementCompute>>::IsArgumentsNeeded) { intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_); } else { intermediate = skip_elementwise_ ? intermediate : activation(intermediate); } // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_accumulator(params_.alpha, converted_accumulator); // D = alpha * Accum } if constexpr (GenericActivationTraits<ActivationFunctor<FragmentCompute>>::IsArgumentsNeeded) { intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_); } else { intermediate = skip_elementwise_ ? intermediate : activation(intermediate); } // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/thread/linear_combination_generic.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_generic.h", "repo_id": "cutlass", "token_count": 3155 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. The shared memory resource is time-sliced across warps. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/epilogue_base_streamk.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value) > class Epilogue : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>, public EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using BaseStreamK = EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// Number of warps per block using WarpCount = typename Base::WarpCount; /// Number of threads per block static int const kBlockThreads = 32 * WarpCount::kCount; /// Per-thread accumulator tile type using AccumulatorTile = typename Base::AccumulatorTile; /// Numerical accumulation element type using ElementAccumulator = typename WarpMmaOperator::ElementC; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Vector type used by the global output iterator using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Vector type used by the shared output iterator using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1."); public: /// Aspect for when epilogue source is not needed struct SourceAspectNotNeeded { /// Constructor CUTLASS_DEVICE SourceAspectNotNeeded() {} // No-op CUTLASS_DEVICE void load() { } /// Invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &aligned_accum_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i]); } } }; /// Aspect for when epilogue source is needed struct SourceAspectNeeded { OutputTileIterator source_iterator; typename OutputTileIterator::Fragment source_fragment; /// Invoke the output functor over each vector of output CUTLASS_DEVICE static void apply_output_operator( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]); } } /// Constructor CUTLASS_DEVICE SourceAspectNeeded(OutputTileIterator source_iterator) : source_iterator(source_iterator) { source_fragment.clear(); } // Load addend source fragment from global memory CUTLASS_DEVICE void load() { source_iterator.load(source_fragment); ++source_iterator; } /// Invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &aligned_accum_fragment) { apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment); } }; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Thread index in the threadblock int thread_idx; /// Warp index in the threadblock int warp_idx; public: /// Constructor CUTLASS_DEVICE Epilogue( typename Base::SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx) ///< Id of thread within warp : Base(shared_storage, thread_idx, warp_idx, lane_idx), BaseStreamK(thread_idx), shared_load_iterator_(shared_storage.reference(), thread_idx), thread_idx(thread_idx), warp_idx(warp_idx) {} /// Aggregates the accumulator sets shared by peer blocks in the global workspace, /// performing epilogue computations, writing to output CUTLASS_DEVICE void reduce( int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *element_workspace, OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) { // Reduce peer accumulator fragments into one fragment AccumulatorFragment accum_fragment; BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace); // Store fragment to shared memory this->warp_tile_iterator_.store(accum_fragment); __syncthreads(); // Initialize/load source-fragment data typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); if (output_op.is_source_needed()) { source_iterator += reduce_fragment_idx; source_iterator.load(source_fragment); } // Load fragment from shared memory typename SharedLoadIterator::Fragment aligned_accum_fragment; shared_load_iterator_.load(aligned_accum_fragment); // Add fragments shared by other k partitions if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { typename SharedLoadIterator::Fragment aligned_addend_fragment; shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_addend_fragment); aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment); } } // Compute the output result typename OutputTileIterator::Fragment output_fragment; // Apply the output operator SourceAspectNeeded::apply_output_operator( output_fragment, output_op, aligned_accum_fragment, source_fragment); // Store the final result destination_iterator += reduce_fragment_idx; destination_iterator.store(output_fragment); } /// Perform the epilogue computations and stream the result to global memory. CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators) ///< Complete warp-level accumulator tile { operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded()); } /// Perform the epilogue computations and stream the result to global memory. Implements /// two alternative codepaths, depending on whether the output op requires addend data to be loaded. CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator ) ///< Tile iterator for addend source { if (output_op.is_source_needed()) { operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator)); } else { operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded()); } } /// Perform the epilogue computations and stream the result to global memory. Implements a /// single codepath, regardless of whether the output op requires addend data to be loaded CUTLASS_DEVICE void unified( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator ) ///< Tile iterator for addend source { if (!output_op.is_source_needed()) { source_iterator.clear_mask(); __syncthreads(); // Dummy (CUDA 11.0) } operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator)); } template<class Seq> struct acc2smem; template <size_t... Seq> struct acc2smem<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Streams the result to global memory template <typename SourceAspect> CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile SourceAspect source) { // Iterator over warp-level accumulator fragment AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source.load(); // // Convert and store fragment // __syncthreads(); acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Compute the output result // typename OutputTileIterator::Fragment output_fragment; source.apply_output_operator(output_fragment, output_op, aligned_accum_fragment[0]); // // Store the final result // destination_iterator.store(output_fragment); ++destination_iterator; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue.h", "repo_id": "cutlass", "token_count": 6815 }
29
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree operation base implementation to enable composable fusions for the CUTLASS 2x epilogue */ #pragma once #include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::threadblock { using namespace cute; using cute::tuple; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <class... Ops> struct VisitorImpl2x: fusion::detail::Sm90VisitorImplBase<Ops...> { using fusion::detail::Sm90VisitorImplBase<Ops...>::Sm90VisitorImplBase; using fusion::detail::Sm90VisitorImplBase<Ops...>::ops; template <class CallbacksTuple> struct Callbacks { // Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables CallbacksTuple callbacks_tuple; /// Called at the start of the epilogue just before iterating over accumulator slices CUTLASS_DEVICE void begin_epilogue() { for_each(callbacks_tuple, [] (auto& callbacks) { callbacks.begin_epilogue(); } ); } /// Called at the start of one step before starting accumulator exchange CUTLASS_DEVICE void begin_step(int step_idx) { for_each(callbacks_tuple, [&] (auto& callbacks) { callbacks.begin_step(step_idx); } ); } /// Called at the start of a row CUTLASS_DEVICE void begin_row(int row_idx) { for_each(callbacks_tuple, [&] (auto& callbacks) { callbacks.begin_row(row_idx); } ); } /// Called after accumulators have been exchanged for each accumulator vector template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize> CUTLASS_DEVICE auto // returns an Array visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInputs, FragmentSize> const&... frg_inputs) // depends on the N-naryness of the op = delete; // Must be implemented for each operation /// Called at the start of a row CUTLASS_DEVICE void end_row(int row_idx) { for_each(callbacks_tuple, [&] (auto& callbacks) { callbacks.end_row(row_idx); } ); } /// Called after all accumulator elements have been visited CUTLASS_DEVICE void end_step(int step_idx) { for_each(callbacks_tuple, [&] (auto& callbacks) { callbacks.end_step(step_idx); } ); } /// Called after all steps have been completed CUTLASS_DEVICE void end_epilogue() { for_each(callbacks_tuple, [] (auto& callbacks) { callbacks.end_epilogue(); } ); } }; // Callbacks factory // All operations must redefine this template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { return transform_apply(ops, [&] (auto& op) { return op.get_callbacks( threadblock_tile_offset, thread_idx, problem_shape); }, [] (auto&&... callbacks) { auto callbacks_tuple = cute::make_tuple(callbacks...); return Callbacks<decltype(callbacks_tuple)>{callbacks_tuple}; } ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Convenience aliases using EmptyCallbacks = VisitorImpl2x<>::Callbacks<cute::tuple<>>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace detail using namespace detail; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Tree visitor // ///////////////////////////////////////////////////////////////////////////////////////////////// template <class NodeOp, class... ChildOps> struct TreeVisitor2x : VisitorImpl2x<ChildOps..., NodeOp> { using VisitorImpl2x<ChildOps..., NodeOp>::VisitorImpl2x; template<class CallbacksImpl> struct Callbacks : CallbacksImpl { CUTLASS_DEVICE Callbacks(CallbacksImpl&& impl) : CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {} using CallbacksImpl::callbacks_tuple; template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE auto visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc) { constexpr int Rm1 = sizeof...(ChildOps); return cute::detail::tapply(callbacks_tuple, [&] (auto& child_callbacks) { return child_callbacks.visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc); }, [&] (auto&&... frg_inputs) { return get<Rm1>(callbacks_tuple).visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...); }, make_seq<Rm1>{} ); } }; // Callbacks factory template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { return Callbacks< decltype(VisitorImpl2x<ChildOps..., NodeOp>:: get_callbacks( threadblock_tile_offset, thread_idx, problem_shape ))>( VisitorImpl2x<ChildOps..., NodeOp>:: get_callbacks( threadblock_tile_offset, thread_idx, problem_shape ) ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< class ElementCompute, class EdgeTuple, class... Ops > struct TopologicalVisitor2x : VisitorImpl2x<Ops...> { static_assert(is_static_v<EdgeTuple>); static_assert(cute::rank(EdgeTuple{}) == sizeof...(Ops)); static_assert(sizeof...(Ops) > 1); using VisitorImpl2x<Ops...>::VisitorImpl2x; template<class CallbacksImpl> struct Callbacks : CallbacksImpl { CUTLASS_DEVICE Callbacks(CallbacksImpl&& impl) : CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {} using CallbacksImpl::callbacks_tuple; template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE auto visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc) { constexpr int Rm1 = sizeof...(Ops) - 1; auto frg_compute_tuple = cute::repeat<Rm1>(Array<ElementCompute, FragmentSize>{}); return cute::detail::tapply(EdgeTuple{}, callbacks_tuple, frg_compute_tuple, // Visit the first R-1 ops in topological order [&] (auto&& edge_seq, auto& callbacks, auto& frg_compute) { frg_compute = cute::detail::apply(frg_compute_tuple, // Compute the current op with children inputs [&] (auto const&... frg_inputs) { auto frg_output = callbacks.visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...); using ElementOutput = typename decltype(frg_output)::Element; using ConvertOutput = NumericArrayConverter<ElementCompute, ElementOutput, FragmentSize>; ConvertOutput convert_output{}; return convert_output(frg_output); }, // Get inputs in the sequence given by the children indices of the current op edge_seq ); return frg_compute; }, // Visit the last op [&] (auto const&...ops) { return cute::detail::apply(frg_compute_tuple, // Compute the last op with children inputs [&] (auto const&... frg_inputs) { return get<Rm1>(callbacks_tuple).visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...); }, // Get inputs in the sequence given by the children indices of the last op get<Rm1>(EdgeTuple{}) ); }, // Transform to visit R-1 ops, apply to visit last op make_seq<Rm1>{} ); } }; // Callbacks factory template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { return Callbacks<decltype( VisitorImpl2x<Ops...>:: get_callbacks( threadblock_tile_offset, thread_idx, problem_shape ))>( VisitorImpl2x<Ops...>:: get_callbacks( threadblock_tile_offset, thread_idx, problem_shape ) ); } }; template <class NodeOp, class... ChildOps> using Sm80EVT = TreeVisitor2x<NodeOp, ChildOps...>; template< class ElementCompute, class EdgeTuple, class... Ops > using Sm80TopologicalVisitor = TopologicalVisitor2x<ElementCompute, EdgeTuple, Ops...>; using X = Underscore; ///////////////////////////////////////////////////////////////////////////////////////////////// // OutputTileThreadLayout translate the CUTLASS 2.X OutputTileOptimalThreadMap into cute layout // used by CUTLASS 3.X Epilogue template < typename ThreadblockShape_, typename WarpShape_, typename Element_, int ElementsPerAccess, int Stages_=1 > struct OutputTileThreadLayout: DefaultThreadMapTensorOp< ThreadblockShape_, WarpShape_, ThreadblockShape_::kK/WarpShape_::kK, Element_, ElementsPerAccess>::Type { using Base = typename DefaultThreadMapTensorOp< ThreadblockShape_, WarpShape_, ThreadblockShape_::kK/WarpShape_::kK, Element_, ElementsPerAccess>::Type; using Base::Base; // Software pipeline stages in epilogue static_assert(Stages_ <= 2, "Sm80 EVT only support upto 2 Stages."); static const int Stages = Stages_; using ThreadShape = cute::Shape< cute::Int<Base::Detail::kAccessWidth>, // lane col idx cute::Int<Base::Detail::kAccessRows>, // lane row idx cute::Int<Base::Detail::kWarpsRemainingForRows>, // warp row idx cute::Int<Base::Shape::kGroup>, // group idx cute::Int<Base::Shape::kCluster> // cluster idx >; using Shape = typename Base::Shape; using Count = typename Base::Count; using ThreadMapShape = cute::Shape< // Column Int<Base::kElementsPerAccess>, // vector Int<Base::Detail::kAccessWidth>, // lane_col_coord Int<Base::Iterations::kColumn>, // iteration::column // Row Int<Base::Detail::kAccessRows>, // lane_row_coord Int<Base::Iterations::kRow>, // iterations in row Int<Base::Detail::kWarpsRemainingForRows>, // warp_row_coord Int<Count::kRow>, // iteration::row Int<Count::kGroup>, // iteration::group Int<Shape::kGroup>, // group_coord Int<Count::kCluster>, // iteration::cluster Int<Shape::kCluster> // cluster_coord >; // The shape of CTA Tile using CtaShapeMNL = cute::Shape< Int< Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster * Count::kCluster >, Int<Shape::kColumn * Count::kColumn>, _1 >; static const int kElementsPerAccess = ElementsPerAccess; // // Methods // CUTLASS_DEVICE static auto tid2coord(int thread_idx) { return cute::idx2crd(thread_idx, ThreadShape{}); } template <class TensorInput> CUTLASS_DEVICE static auto partition(TensorInput &&xT, int thread_idx, gemm::GemmCoord threadblock_tile_offset) { // (BLK_M,BLK_N) Tensor bCxT = local_tile( xT, CtaShapeMNL{}, make_coord(_,_,_), Step<_1,_1, X>{} )(_,_,threadblock_tile_offset.m(),threadblock_tile_offset.n(),threadblock_tile_offset.k()); auto [lane_col_coord, lane_row_coord, warp_row_coord, group_coord, cluster_coord] = tid2coord(thread_idx); // transform to column-major Tensor bCxT_nm = make_tensor( std::forward<decltype(bCxT)>(bCxT).data(), make_layout(get<1>(bCxT.layout()), get<0>(bCxT.layout())) ).compose(make_layout(ThreadMapShape{})); // VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER return bCxT_nm(_,lane_col_coord,_,lane_row_coord,_,warp_row_coord,_,_,group_coord,_,cluster_coord); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::threadblock /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_2x.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_2x.hpp", "repo_id": "cutlass", "token_count": 5548 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_ ///< Element data type > class PredicatedTileIteratorStridedDgrad { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { /// Convolution problem size cutlass::conv::Conv2dProblemSize problem_size; int tiled_rows_per_filter; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout, cutlass::conv::Conv2dProblemSize problem_size_, int threadblock_row): problem_size(problem_size_), PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_row); tiled_rows_per_filter = tile_m_per_filter * threadblock_row; } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Starting Dx h and w dimension for strided dgrad mapping int start_h_, start_w_; /// Effective Dy P and Q dimensions for strided dgrad mapping int p_, q_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column position (assuming steady-state predicates have been computed) Index thread_start_column_; /// Internal state counter int state_[3]; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorStridedDgrad( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int start_r, int start_s, TensorCoord threadblock_offset = TensorCoord() ): params_(params) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; int r = start_r; int s = start_s; if (params_.problem_size.mode == cutlass::conv::Mode::kConvolution) { r = (params_.problem_size.R - 1 - r); s = (params_.problem_size.S - 1 - s); } // compute starting coordinates in Dx start_h_ and start_w_ strided_dgrad_starting_coords( params_.problem_size, stride_h_divmod, stride_w_divmod, r, s, start_h_, start_w_); p_ = (params_.problem_size.H - start_h_ + params_.problem_size.stride_h - 1) / params_.problem_size.stride_h; q_ = (params_.problem_size.W - start_w_ + params_.problem_size.stride_w - 1) / params_.problem_size.stride_w; extent_row_ = extent.row(); thread_start_row_ = thread_offset.row(); thread_start_column_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); } // Null pointer performs no accesses if (!pointer) { mask_.clear(); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer); // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; // remapping rows to find the mapped_row_offset int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter; // (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout] int n = npq_offset / (p_ * q_); int residual = npq_offset % (p_ * q_); int p = residual / q_; int q = residual % q_; int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) + (start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W + (start_w_ + q * params_.problem_size.stride_w); bool row_guard = mapped_row_offset < extent_row_; int64_t row_byte_offset = mapped_row_offset * params_.stride; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8); bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset), guard); } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; // remapping rows to find the mapped_row_offset int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter; // (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout] int n = npq_offset / (p_ * q_); int residual = npq_offset % (p_ * q_); int p = residual / q_; int q = residual % q_; int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) + (start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W + (start_w_ + q * params_.problem_size.stride_w); bool row_guard = mapped_row_offset < extent_row_; int64_t row_byte_offset = mapped_row_offset * params_.stride; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8); bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_store<AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset), guard); } } } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorStridedDgrad &operator++() { ++state_[0]; thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; } } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h", "repo_id": "cutlass", "token_count": 5884 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #if !(defined(__clang__) && defined(__CUDA__)) #include "cutlass/cutlass.h" #include "cutlass/wmma_array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/warp/wmma_tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorFragment, ///< wmma fragment to be written (concept: nvcuda::wmma::fragment) typename Layout ///< target shared memory layout > class TileIteratorWmmaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorFragment_ ///< wmma fragment to be written (concept: nvcuda::wmma::fragment) > class TileIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorFragment_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorFragment = OperatorFragment_; using Layout = layout::RowMajor; // // Derived types // using WmmaDataType = typename OperatorFragment::element_type; using Element = typename cutlass::arch::WmmaToCutlassDataType<WmmaDataType>::Type; ///< Data Type of element stored in nvcuda::wmma::frament using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = WmmaFragmentArray<OperatorFragment, Policy::OperatorCount::kColumn * Policy::kWmmaFragmentsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Padding quantity // (Epilogue shared memory padding for WMMA Gemm kernel is set to run optimaly on Turing) using Padding = MatrixShape< 0, 4 * Policy::kElementsPerAccess >; private: /// Storage type for accessing memory //using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to shared memory TensorRef ref_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp(): ref_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp( TensorRef const &ref, unsigned lane_id ): ref_(ref) { } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & add_pointer_offset(Index pointer_offset) { ref_.add_pointer_offset(pointer_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset({tile_offset.row() * OperatorShape::kM, tile_offset.column() * WarpShape::kN}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { for(int n=0; n < Policy::OperatorCount::kColumn; n++) { WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset); nvcuda::wmma::store_matrix_sync( ptr, frag[n], ref_.stride()[0], nvcuda::wmma::layout_t::mem_row_major ); } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { for(int n=0; n < Policy::OperatorCount::kColumn; n++) { WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset); nvcuda::wmma::load_matrix_sync( frag[n], ptr, ref_.stride()[0], nvcuda::wmma::layout_t::mem_row_major ); } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// #endif // !defined(__clang__)
cutlass/include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h", "repo_id": "cutlass", "token_count": 2494 }
32
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cute/arch/cluster_sm90.hpp" #include "cute/arch/copy_sm90.hpp" #include "cutlass/gemm/dispatch_policy.hpp" #include "cute/algorithm/functional.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/algorithm/gemm.hpp" #include "cute/tensor_predicate.hpp" #include "cute/numeric/arithmetic_tuple.hpp" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { using namespace cute; ///////////////////////////////////////////////////////////////////////////////////////////////// // WarpSpecialized Mainloop template < int Stages, class ClusterShape_, class TileShape_, class KernelSchedule, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_> struct CollectiveMma< MainloopSm90CpAsyncGmmaWarpSpecialized<Stages,ClusterShape_,KernelSchedule>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_> { // // Type Aliases // using DispatchPolicy = MainloopSm90CpAsyncGmmaWarpSpecialized<Stages,ClusterShape_,KernelSchedule>; using TileShape = TileShape_; using ClusterShape = ClusterShape_; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{})); using MainloopPipeline = cutlass::PipelineAsync<DispatchPolicy::Stages>; using PipelineState = typename MainloopPipeline::PipelineState; using PipelineParams = typename MainloopPipeline::Params; static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); using SmemLayoutA = decltype(tile_to_shape( SmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more."); static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value && cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value, "MMA atom must source both A and B operand from smem_desc for this mainloop."); struct SharedStorage { struct TensorStorage : cute::aligned_struct<128> { cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A; cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B; } tensors; using PipelineStorage = typename MainloopPipeline::SharedStorage; PipelineStorage pipeline; }; using TensorStorage = typename SharedStorage::TensorStorage; using PipelineStorage = typename SharedStorage::PipelineStorage; // Host side kernel arguments struct Arguments { ElementA const* ptr_A = nullptr; StrideA dA{}; ElementB const* ptr_B = nullptr; StrideB dB{}; uint32_t mma_promotion_interval = 4; }; // Device side kernel params using Params = Arguments; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments( [[maybe_unused]] ProblemShape const& problem_shape, Arguments const& args, [[maybe_unused]] void* workspace) { return args; } template<class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; bool implementable = true; implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyA::NumValSrc>(cute::make_shape(M,K,L), StrideA{}); implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyB::NumValSrc>(cute::make_shape(N,K,L), StrideB{}); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n"); } return implementable; } static constexpr int K_PIPE_MAX = DispatchPolicy::Stages; static constexpr int K_PIPE_MMAS = 1; /// Perform a collective-scoped matrix multiply-accumulate /// Producer Perspective template < class TensorA, class TensorB, class KTileIterator, class ResidueMNK > CUTLASS_DEVICE void load( MainloopPipeline pipeline, PipelineState smem_pipe_write, TensorA const& gA_in, TensorB const& gB_in, KTileIterator k_tile_iter, int k_tile_count, ResidueMNK residue_mnk, int thread_idx, TensorStorage& shared_tensors) { using namespace cute; static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident."); static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident."); Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) // Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k) // This aligns the tensor with BLK_K for all but the 0th k_tile Tensor gA = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA_in); Tensor gB = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB_in); // Partition the copying of A and B tiles across the threads GmemTiledCopyA gmem_tiled_copy_a; GmemTiledCopyB gmem_tiled_copy_b; auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx); auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx); Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k) Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE) Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k) Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE) // Allocate predicate tensors for m and n Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{}); Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{}); // Construct identity layout for sA and sB Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Repeat the partitioning with identity layouts Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Set predicates for m bounds CUTLASS_PRAGMA_UNROLL for (int m = 0; m < size<0>(tApA); ++m) { tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m } // Set predicates for n bounds CUTLASS_PRAGMA_UNROLL for (int n = 0; n < size<0>(tBpB); ++n) { tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n } // 0-th stage with predication on k to account for residue { // LOCK smem_pipe_write for _writing_ pipeline.producer_acquire(smem_pipe_write); int write_stage = smem_pipe_write.index(); // Copy gmem to smem for *k_tile_iter, predicating for k residue Tensor tAgAk = tAgA(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tAsA); ++k) { if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted) copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,write_stage)); } else { clear(tAsA(_,_,k,write_stage)); } } Tensor tBgBk = tBgB(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tBsB); ++k) { if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted) copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,write_stage)); } else { clear(tBsB(_,_,k,write_stage)); } } ++k_tile_iter; --k_tile_count; // UNLOCK smem_pipe_write pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive); // Advance smem_pipe_write ++smem_pipe_write; } // Mainloop CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 0; --k_tile_count) { // LOCK smem_pipe_write for _writing_ pipeline.producer_acquire(smem_pipe_write); int write_stage = smem_pipe_write.index(); // Copy gmem to smem for *k_tile_iter copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage)); copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage)); ++k_tile_iter; // UNLOCK smem_pipe_write pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive); // Advance smem_pipe_write ++smem_pipe_write; } } /// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster CUTLASS_DEVICE void load_tail( MainloopPipeline pipeline, PipelineState smem_pipe_write) { // Issue the epilogue waits /* This helps avoid early exit of blocks in Cluster * Waits for all stages to either be released (all * Consumer UNLOCKs), or if the stage was never used * then would just be acquired since the phase was * still inverted from make_producer_start_state */ pipeline.producer_tail(smem_pipe_write); } /// Perform a collective-scoped matrix multiply-accumulate /// Consumer Perspective template < class FrgTensorC > CUTLASS_DEVICE void mma(MainloopPipeline pipeline, PipelineState smem_pipe_read, FrgTensorC& accum, int k_tile_count, int thread_idx, TensorStorage& shared_tensors, Params const& mainloop_params) { using namespace cute; static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3."); static_assert(cute::is_void_v<SmemCopyAtomA>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); static_assert(cute::is_void_v<SmemCopyAtomB>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) // // Define C accumulators and A/B partitioning // TiledMma tiled_mma; auto thread_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE) // Allocate "fragments/descriptors" Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE // // PIPELINED MAIN LOOP // static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX), "ERROR : Incorrect number of MMAs in flight"); // We release buffers to producer warps(dma load) with some mmas in flight PipelineState smem_pipe_release = smem_pipe_read; // Prologue GMMAs int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count); tiled_mma.accumulate_ = GMMA::ScaleOut::Zero; warpgroup_fence_operand(accum); CUTLASS_PRAGMA_UNROLL for (int k_tile_prologue = prologue_mma_count; k_tile_prologue > 0; --k_tile_prologue) { // WAIT on smem_pipe_read until its data are available (phase bit flips from rdPhaseBit value) auto barrier_token = pipeline.consumer_try_wait(smem_pipe_read); pipeline.consumer_wait(smem_pipe_read, barrier_token); int read_stage = smem_pipe_read.index(); warpgroup_arrive(); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { // (V,M,K) x (V,N,K) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; } warpgroup_commit_batch(); ++smem_pipe_read; } warpgroup_fence_operand(accum); // Mainloop GMMAs k_tile_count -= prologue_mma_count; CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 0; --k_tile_count) { // WAIT on smem_pipe_read until its data are available (phase bit flips from rdPhaseBit value) auto barrier_token = pipeline.consumer_try_wait(smem_pipe_read); pipeline.consumer_wait(smem_pipe_read, barrier_token); int read_stage = smem_pipe_read.index(); warpgroup_fence_operand(accum); warpgroup_arrive(); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { // (V,M,K) x (V,N,K) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; } warpgroup_commit_batch(); /// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_write is consumed warpgroup_wait<K_PIPE_MMAS>(); warpgroup_fence_operand(accum); // UNLOCK smem_pipe_release, done _computing_ on it pipeline.consumer_release(smem_pipe_release); // Advance smem_pipe_read and smem_pipe_release ++smem_pipe_read; ++smem_pipe_release; } warpgroup_fence_operand(accum); } /// Perform a Consumer Epilogue to release all buffers CUTLASS_DEVICE void mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) { // Prologue GMMAs int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count); k_tile_count -= prologue_mma_count; smem_pipe_release.advance(k_tile_count); // Wait on all GMMAs to complete warpgroup_wait<0>(); for (int count = 0; count < prologue_mma_count; ++count) { pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_ss_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_ss_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 8398 }
33
/*************************************************************************************************** * Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a sparse GEMM kernel that computes the absolute maximum of the output tensor and applies additional scaling factors to operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/sparse_gemm.h" #include "cutlass/gemm/kernel/default_gemm_sparse_with_absmax.h" #include "cutlass/gemm/device/default_gemm_configuration.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm70, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// If true, kernel supports split-K with serial reduction bool SplitKSerial = false, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator> class SparseGemmWithAbsmax { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; using MathOperator = Operator; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Define the kernel using GemmKernel = typename kernel::DefaultSparseGemmWithAbsmax< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kSplitKSerial, Operator >::GemmKernel; using ElementE = typename GemmKernel::ElementE; using LayoutE = typename GemmKernel::LayoutE; static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value; static int const kSparse = GemmKernel::kSparse; static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits; static int const kElementsPerElementE = GemmKernel::kElementsPerElementE; using Arguments = typename GemmKernel::Arguments; private: /// Kernel parameters object typename GemmKernel::Params params_; public: /// Constructs the GEMM. SparseGemmWithAbsmax() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { if (!kSplitKSerial && args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } Status status = GemmKernel::can_implement( args.problem_size, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D, args.ref_E.non_const_ref() ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); if (kSplitKSerial && args.split_k_slices > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); if (kSplitKSerial) { if (args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } } // Initialize the Params structure params_ = typename GemmKernel::Params{ args.problem_size, grid_shape, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D, args.ref_E.non_const_ref(), args.ref_Aux, args.ptr_Vector, args.ldr, args.epilogue, static_cast<int *>(workspace) }; int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { if (kSplitKSerial && args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } } params_.ref_A.reset(args.ref_A.non_const_ref().data()); params_.ref_B.reset(args.ref_B.non_const_ref().data()); params_.ref_C.reset(args.ref_C.non_const_ref().data()); params_.ref_D.reset(args.ref_D.data()); params_.ref_E.reset(args.ref_E.non_const_ref().data()); params_.output_op = args.epilogue; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(GemmKernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/gemm_sparse_with_absmax.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/gemm_sparse_with_absmax.h", "repo_id": "cutlass", "token_count": 4371 }
34
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/arch/arch.h" #include "cutlass/gemm/gemm.h" #include "cute/layout.hpp" #include "cute/numeric/integral_constant.hpp" ////////////////////////////////////////////////////////////////////////////// namespace cutlass::detail { template <class T, template <int...> class U> struct is_kernel_tag_of : cute::false_type {}; template <template <int...> class U, int... Args> struct is_kernel_tag_of<U<Args...>, U> : cute::true_type {}; template <class T, template <int...> class U> constexpr bool is_kernel_tag_of_v = is_kernel_tag_of<T, U>::value; } ////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm { using namespace cute; ////////////////////////////////////////////////////////////////////////////// namespace detail { enum class KernelInputTransformType { FastF32, InterleavedComplexTF32 }; } // namespace detail ////////////////////////////////////////////////////////////////////////////// // // Kernel schedule policies (the base class tags, one for each kernel layer file) // struct KernelMultistage { }; struct KernelCpAsyncWarpSpecialized { }; struct KernelCpAsyncWarpSpecializedPingpong { }; struct KernelCpAsyncWarpSpecializedCooperative { }; struct KernelTma { }; struct KernelTmaWarpSpecialized { }; struct KernelTmaWarpSpecializedPingpong { }; struct KernelTmaWarpSpecializedCooperative { }; struct KernelPtrArrayTmaWarpSpecializedCooperative { }; ////////////////////////////////////////////////////////////////////////////// // // Builder dispatch policies (not a part of the main CUTLASS layers, simply used to opt into // specific collective builder dispatches) // // FP8 related policies (including Fast Accumulation) struct KernelTmaWarpSpecializedFP8FastAccum : KernelTmaWarpSpecialized { }; struct KernelTmaWarpSpecializedPingpongFP8FastAccum : KernelTmaWarpSpecializedPingpong { }; struct KernelTmaWarpSpecializedCooperativeFP8FastAccum: KernelTmaWarpSpecializedCooperative { }; struct KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum : KernelPtrArrayTmaWarpSpecializedCooperative { }; // Policies to opt into mixed type GEMMs struct KernelTmaWarpSpecializedMixedInput : KernelTmaWarpSpecialized { }; struct KernelTmaWarpSpecializedPingpongMixedInput : KernelTmaWarpSpecializedPingpong { }; struct KernelTmaWarpSpecializedCooperativeMixedInput: KernelTmaWarpSpecializedCooperative { }; ////////////////////////////////////////////////////////////////////////////// // Policies for dispatch of epilogue struct EpilogueDefault { }; struct EpilogueTransposed { }; ////////////////////////////////////////////////////////////////////////////// // // Collective Mainloop Policies // // 2 stage pipeline through 1 stage in smem, 1 in rmem, WITHOUT predicated gmem loads struct MainloopSm70TwoStageUnpredicated { constexpr static int Stages = 2; using ArchTag = arch::Sm70; using Schedule = KernelMultistage; using ClusterShape = Shape<_1,_1,_1>; }; // 2 stage pipeline through 1 stage in smem, 1 in rmem, with predicated gmem loads struct MainloopSm70TwoStage { constexpr static int Stages = 2; using ArchTag = arch::Sm70; using Schedule = KernelMultistage; using ClusterShape = Shape<_1,_1,_1>; }; // n-buffer in smem (cp.async), pipelined with registers, WITHOUT predicated gmem loads template<int Stages_> struct MainloopSm80CpAsyncUnpredicated { constexpr static int Stages = Stages_; using ArchTag = arch::Sm80; using Schedule = KernelMultistage; using ClusterShape = Shape<_1,_1,_1>; }; // n-buffer in smem (cp.async), pipelined with registers, with predicated gmem loads template<int Stages_> struct MainloopSm80CpAsync { constexpr static int Stages = Stages_; using ArchTag = arch::Sm80; using Schedule = KernelMultistage; using ClusterShape = Shape<_1,_1,_1>; }; // n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelCpAsyncWarpSpecialized > struct MainloopSm90CpAsyncGmmaWarpSpecialized { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; }; // n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelCpAsyncWarpSpecialized > struct MainloopSm90CpAsyncGmmaRmemAWarpSpecialized { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; }; // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, static schedule between TMA and GMMA template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, int PipelineAsyncMmaStages_ = 1 > struct MainloopSm90TmaGmma { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; constexpr static int PipelineAsyncMmaStages = PipelineAsyncMmaStages_; using ArchTag = arch::Sm90; using Schedule = KernelTma; }; // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelTmaWarpSpecializedCooperative > struct MainloopSm90TmaGmmaWarpSpecialized { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; }; // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule // With GMMA's A data from registers. template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelTmaWarpSpecialized > struct MainloopSm90TmaGmmaRmemAWarpSpecialized { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; static_assert( cute::is_same_v<Schedule, KernelTmaWarpSpecialized> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative>, "KernelSchedule must be one of the warp specialized policies"); }; template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelTmaWarpSpecialized > struct MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; static_assert( cute::is_same_v<Schedule, KernelTmaWarpSpecialized> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedMixedInput> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpongMixedInput> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative> || cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperativeMixedInput>, "KernelSchedule must be one of the warp specialized policies"); }; // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule // For FP8 kernels template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelTmaWarpSpecialized > struct MainloopSm90TmaGmmaWarpSpecializedFP8 : MainloopSm90TmaGmmaWarpSpecialized<Stages_, ClusterShape_, KernelSchedule> { static_assert( cute::is_same_v<KernelSchedule, KernelTmaWarpSpecialized> || cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedPingpong> || cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedCooperative>, "KernelSchedule must be one of the warp specialized policies"); }; // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule for Ptr-Array and Grouped Gemm template< int Stages_, class ClusterShape_ = Shape<_1,_1,_1>, class KernelSchedule = KernelPtrArrayTmaWarpSpecializedCooperative > struct MainloopSm90ArrayTmaGmmaWarpSpecialized { constexpr static int Stages = Stages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; static_assert( cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, KernelSchedule>, "KernelSchedule must be one of the Ptr-Array or Grouped Gemm TMA Warp Specialized Cooperative policies"); }; ////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm
cutlass/include/cutlass/gemm/dispatch_policy.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/dispatch_policy.hpp", "repo_id": "cutlass", "token_count": 3140 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are accommodated by exchanging A and B operands and assuming transposed layouts. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/complex.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/symm_universal.h" #include "cutlass/gemm/kernel/default_symm.h" #include "cutlass/gemm/kernel/default_symm_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Side Mode for A (kLeft or kRight) SideMode SideModeA, /// Fill Mode for A (kLower or kUpper) FillMode FillModeA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by SYRK typename Operator, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ = BlasMode::kSymmetric, /// typename Enable = void > struct DefaultSymmUniversal; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Real-valued SYMM/HEMM update kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Side Mode for A (kLeft or kRight) SideMode SideModeA, /// Fill Mode for A (kLower or kUpper) FillMode FillModeA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by SYMM/HEMM typename Operator> struct DefaultSymmUniversal< ElementA, LayoutA, SideModeA, FillModeA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, BlasMode::kSymmetric, typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultSymmkernel = typename kernel::DefaultSymm< ElementA, LayoutA, SideModeA, FillModeA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, BlasMode::kSymmetric >::SymmKernel; /// Define the kernel in terms of the default kernel using SymmKernel = kernel::SymmUniversal< typename DefaultSymmkernel::Mma1, typename DefaultSymmkernel::Mma2, typename DefaultSymmkernel::Epilogue, ThreadblockSwizzle, SideModeA, FillModeA >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Complex-valued SYMM/HEMM update kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Side Mode for A (kLeft or kRight) SideMode SideModeA, /// Fill Mode for A (kLower or kUpper) FillMode FillModeA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by SYRK typename Operator, // BlasMode BlasMode kBlasMode > struct DefaultSymmUniversal< ElementA, LayoutA, SideModeA, FillModeA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, kBlasMode, typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultSymmkernel = typename kernel::DefaultSymmComplex< ElementA, LayoutA, SideModeA, FillModeA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, Operator, SplitKSerial, kBlasMode >::SymmKernel; /// Define the kernel in terms of the default kernel using SymmKernel = kernel::SymmUniversal< typename DefaultSymmkernel::Mma1, typename DefaultSymmkernel::Mma2, typename DefaultSymmkernel::Epilogue, ThreadblockSwizzle, SideModeA, FillModeA >; }; } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/default_symm_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_symm_universal.h", "repo_id": "cutlass", "token_count": 3292 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for GEMM performing a reduction over K partitions in parallel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmSplitKParallel { using Mma = Mma_; using Epilogue = Epilogue_; using OutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; static int const kAlignmentK = Mma::Operator::Shape::kK; /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; int swizzle_log_tile; typename Mma::IteratorA::Params params_A; typename Mma::IteratorA::TensorRef ref_A; typename Mma::IteratorB::Params params_B; typename Mma::IteratorB::TensorRef ref_B; typename Epilogue::OutputTileIterator::Params params_D; typename Epilogue::OutputTileIterator::TensorRef ref_D; typename OutputOp::Params output_op; int64_t splitk_slice_stride; int gemm_k_size; // // Methods // CUTLASS_HOST_DEVICE Params(): swizzle_log_tile(0) { } CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_D, typename OutputOp::Params output_op, int64_t splitk_slice_stride ): problem_size(problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(ref_A.layout()), ref_A(ref_A), params_B(ref_B.layout()), ref_B(ref_B), params_D(ref_D.layout()), ref_D(ref_D), output_op(output_op), splitk_slice_stride(splitk_slice_stride) { int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK; int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k(); gemm_k_size = gemm_k_iterations * Mma::Shape::kK; } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE GemmSplitKParallel() { } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size, }; cutlass::MatrixCoord tb_offset_B{ threadblock_tile_offset.k() * params.gemm_k_size, threadblock_tile_offset.n() * Mma::Shape::kN }; // Problem size is a function of threadblock index in the K dimension int problem_size_k; if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) { problem_size_k = params.problem_size.k(); } else { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, params.ref_A.data(), {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, params.ref_B.data(), {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); int warp_idx = threadIdx.x / 32; int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // OutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); // Tile iterator writing to output tile typename Epilogue::OutputTileIterator iterator_D( params.params_D, params.ref_D.data(), params.problem_size.mn(), thread_idx, threadblock_offset ); iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k()); // Execute the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_D); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h", "repo_id": "cutlass", "token_count": 2945 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Grouped Rank2K kernel. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/layout/matrix.h" #include "cutlass/trace.h" #include "cutlass/gemm/kernel/rank_2k_transpose_operands.h" #include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T) typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T) typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper) BlasMode BlasMode_, ///! Blas3 computation mode GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform bool Transposed = false > struct Rank2KGrouped { public: using Mma1 = Mma1_; using Mma2 = Mma2_; static_assert(platform::is_same<typename Mma1::LayoutC, cutlass::layout::RowMajor>::value && platform::is_same<typename Mma2::LayoutC, cutlass::layout::RowMajor>::value, "Kernel-level grouped Rank2K requires that LayoutC be row major."); // Define generic Mma for usecases that use Kernel::Mma using Mma = Mma1_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; static bool const kTransposed = Transposed; // Public-facing type definitions related to operand element type, layout, and complex conjugate // operation. Must interact with the 'kTransposed' notion to reflect the original layout, // fill mode, etc. passed in. // // Recall that a Rank2K operation performs (A x BT) + (B x AT) // This is performed via: // Mma1 = (A x BT) // Mma2 = (B x AT) // // However, if C needs to be transposed, then this is changed to the following: // Mma1 = (B x AT) // Mma2 = (A x BT) // // The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc. // of A and B as they are passed into the instantiations of Mma1 and Mma2. // // Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred, // we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into // the device-level call. // // The logic to do this (which is made clearer by referencing the above instantiations) is as follows: // LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA // LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA // // We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments: using MapArgumentsA = kernel::detail::Rank2KMapArguments< typename Mma1::IteratorA::Element, typename Mma1::IteratorA::Layout, Mma1::kTransformA, Mma1::IteratorA::AccessType::kElements, typename Mma2::IteratorA::Element, typename Mma2::IteratorA::Layout, Mma2::kTransformA, Mma2::IteratorA::AccessType::kElements, typename Mma1::LayoutC, FillModeC_, kTransposed >; using ElementA = typename MapArgumentsA::ElementA; using LayoutA = typename MapArgumentsA::LayoutA; static int const kAlignmentA = MapArgumentsA::kAlignmentA; using MapArgumentsB = kernel::detail::Rank2KMapArguments< typename Mma2::IteratorA::Element, typename Mma2::IteratorA::Layout, Mma2::kTransformA, Mma2::IteratorA::AccessType::kElements, typename Mma1::IteratorA::Element, typename Mma1::IteratorA::Layout, Mma1::kTransformA, Mma1::IteratorA::AccessType::kElements, typename Mma2::LayoutC, FillModeC_, kTransposed >; using ElementB = typename MapArgumentsB::ElementA; using LayoutB = typename MapArgumentsB::LayoutA; static int const kAlignmentB = MapArgumentsB::kAlignmentA; // Use the user-provided TransformA and TransformB, rather than those // resulting from MapArguments, because Mma1 and Mma2 may have different // complex transforms than those passed in by the user. // (See kernel/rank_2k_complex.h for an example of this) static cutlass::ComplexTransform const kTransformA = OriginalTransformA_; static cutlass::ComplexTransform const kTransformB = OriginalTransformB_; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename MapArgumentsA::LayoutC; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; static FillMode const kFillModeC = MapArgumentsA::kFillModeC; // Common type definitions for Mma1 and Mma2 using Operator = typename Mma1::Operator; using OperatorClass = typename Mma1::Operator::OperatorClass; using ThreadblockShape = typename Mma1::Shape; using WarpShape = typename Mma1::Operator::Shape; using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; using ArchTag = typename Mma1::ArchTag; static int const kStages = Mma1::kStages; static BlasMode const kBlasMode = BlasMode_; private: static FillMode const kInternalFillModeC = FillModeC_; public: /// Warp count (concept: GemmShape) using WarpCount = typename Mma1::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using ProblemVisitor = Rank2KGroupedProblemVisitor< ThreadblockShape, kGroupScheduleMode, kThreadCount, kThreadCount, kInternalFillModeC>; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode = GemmUniversalMode::kGemm; GemmCoord *problem_sizes = nullptr; int problem_count{0}; int threadblock_count{0}; typename EpilogueOutputOp::Params epilogue; ElementA ** ptr_A = nullptr; ElementB ** ptr_B = nullptr; ElementC ** ptr_C = nullptr; ElementC ** ptr_D = nullptr; typename LayoutA::Stride::LongIndex *lda = nullptr; typename LayoutB::Stride::LongIndex *ldb = nullptr; typename LayoutC::Stride::LongIndex *ldc = nullptr; typename LayoutC::Stride::LongIndex *ldd = nullptr; // Only used by device-level operator GemmCoord *host_problem_sizes = nullptr; bool allow_early_exit = false; // // Methods // /// Default ctor Arguments() = default; /// Ctor CUTLASS_HOST_DEVICE Arguments( GemmUniversalMode mode, GemmCoord *problem_sizes, int problem_count, int threadblock_count, typename EpilogueOutputOp::Params epilogue, ElementA ** ptr_A, ElementB ** ptr_B, ElementC ** ptr_C, ElementC ** ptr_D, typename LayoutA::Stride::LongIndex *lda, typename LayoutB::Stride::LongIndex *ldb, typename LayoutC::Stride::LongIndex *ldc, typename LayoutC::Stride::LongIndex *ldd, GemmCoord *host_problem_sizes=nullptr, bool allow_early_exit=false ): mode(mode), problem_sizes(problem_sizes), problem_count(problem_count), threadblock_count(threadblock_count), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), host_problem_sizes(host_problem_sizes), allow_early_exit(allow_early_exit) { } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { typename ProblemVisitor::Params problem_visitor{}; int threadblock_count = 0; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = 0; ElementA** ptr_A = nullptr; ElementB** ptr_B = nullptr; ElementC** ptr_C = nullptr; ElementC** ptr_D = nullptr; typename LayoutA::Stride::LongIndex* lda = nullptr; typename LayoutB::Stride::LongIndex* ldb = nullptr; typename LayoutC::Stride::LongIndex* ldc = nullptr; typename LayoutC::Stride::LongIndex* ldd = nullptr; bool allow_early_exit = false; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0): problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), threadblock_count(args.threadblock_count), output_op(args.epilogue), ptr_A(args.ptr_A), ptr_B(args.ptr_B), ptr_C(args.ptr_C), ptr_D(args.ptr_D), lda(args.lda), ldb(args.ldb), ldc(args.ldc), ldd(args.ldd), allow_early_exit(args.allow_early_exit) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr, int tile_count = 0) { problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count); threadblock_count = args.threadblock_count; output_op = args.output_op; ptr_A = args.ptr_A; ptr_B = args.ptr_B; ptr_C = args.ptr_C; ptr_D = args.ptr_D; } }; /// Shared memory storage structure struct SharedStorage { union { typename Mma1::SharedStorage mma1_main_loop; typename Mma2::SharedStorage mma2_main_loop; typename Epilogue::SharedStorage epilogue; } kernel; // ProblemVisitor shared storage can't be overlapped with others typename ProblemVisitor::SharedStorage problem_visitor; }; public: // // Methods // Rank2KGrouped() = default; /// Determines whether kernel satisfies alignment static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { return Status::kSuccess; } static Status can_implement(Arguments const &args) { return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Early exit following LAPACK's definition if (params.allow_early_exit && (params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) { return; } // // Problem visitor. // ProblemVisitor problem_visitor( params.problem_visitor, shared_storage.problem_visitor, blockIdx.x); // Outer 'persistent' loop to iterate over tiles while (problem_visitor.next_tile()) { GemmCoord problem_size = problem_visitor.problem_size(); int32_t problem_idx = problem_visitor.problem_index(); int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx); // // Perform checks to determine whether the results of this threadblock will be needed. // An example of an unneeded threadblock is one that is assigned to compute in the upper // portion of a Rank2K kernel filled with mode kLower. // // TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously // returning from `next_tile()`. // // Early exit if threadblock is out of range if (grid_shape.m() <= threadblock_tile_offset.m() || grid_shape.n() <= threadblock_tile_offset.n()) { // Next tile problem_visitor.advance(gridDim.x); continue; } // Skip this tile if Fill Mode is Lower and // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) if (kInternalFillModeC == cutlass::FillMode::kLower && (threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) { // Next tile problem_visitor.advance(gridDim.x); continue; } // Skip this tile if Fill Mode is Upper and // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) if (kInternalFillModeC == cutlass::FillMode::kUpper && threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { // Next tile problem_visitor.advance(gridDim.x); continue; } bool tile_on_diagonal = false; // Mark tiles that are being crossed by the main diagonal // (top-right and bottom-left corners are on either side of the diagonal) if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN && threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { tile_on_diagonal = true; } int offset_k = 0; int problem_size_k = problem_size.k(); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < grid_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k(); } offset_k = threadblock_tile_offset.k() * problem_size.k(); } ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_MxK{ threadblock_tile_offset.m() * Mma1::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_KxN{ offset_k, threadblock_tile_offset.n() * Mma1::Shape::kN }; // Assume identity swizzle MatrixCoord tb_offset( threadblock_tile_offset.m() * Mma1::Shape::kM, threadblock_tile_offset.n() * Mma1::Shape::kN ); // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands for Mma1 typename Mma1::IteratorA iterator_A( Mma1::IteratorA::Params(ldm_A), ptr_A, {problem_size.m(), problem_size_k}, thread_idx, tb_offset_MxK); typename Mma1::IteratorB iterator_BT( Mma1::IteratorB::Params(ldm_B), ptr_B, {problem_size_k, problem_size.n()}, thread_idx, tb_offset_KxN); // Construct iterators to A and B operands for Mma2 typename Mma2::IteratorA iterator_B( Mma2::IteratorA::Params(ldm_B), ptr_B, {problem_size.m(), problem_size_k}, thread_idx, tb_offset_MxK); typename Mma2::IteratorB iterator_AT( Mma2::IteratorB::Params(ldm_A), ptr_A, {problem_size_k, problem_size.n()}, thread_idx, tb_offset_KxN); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply for Mma1 (A x BT) Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx); // Construct thread-scoped matrix multiply for Mma2 (B x AT) Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx); typename Mma1::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; // Wait for all threads to finish their epilogue phases from the previous tile. __syncthreads(); // Compute threadblock-scoped matrix multiply-add (A x BT) mma1( gemm_k_iterations, accumulators, iterator_A, iterator_BT, accumulators); // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. if (kBlasMode == BlasMode::kHermitian) { // // Epilogue // EpilogueOutputOp output_op(params.output_op); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]); // If TB not on diagonal, FillMode doesn't apply. FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), ptr_C, problem_size.mn(), thread_idx, tb_offset, kFillModeTB ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), ptr_D, problem_size.mn(), thread_idx, tb_offset, kFillModeTB ); Epilogue epilogue( shared_storage.kernel.epilogue, thread_idx, warp_idx, lane_idx); // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); __syncthreads(); accumulators.clear(); } // Compute threadblock-scoped matrix multiply-add (B x AT) mma2( gemm_k_iterations, accumulators, iterator_B, iterator_AT, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); /* Needed for HER2K where the second HERK is multiplied by conj(alpha) */ typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1); EpilogueOutputOp output_op_her2k(second_her2k_params); // // Masked tile iterators constructed from members // int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]); // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. if (kBlasMode == BlasMode::kHermitian) { ptr_C = static_cast<ElementC *>(params.ptr_D[problem_idx]); } ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]); // If TB not on diagonal, FillMode doesn't apply. FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), ptr_C, problem_size.mn(), thread_idx, tb_offset, kFillModeTB ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), ptr_D, problem_size.mn(), thread_idx, tb_offset, kFillModeTB ); Epilogue epilogue( shared_storage.kernel.epilogue, thread_idx, warp_idx, lane_idx); // Execute the epilogue operator to update the destination tensor. if (kBlasMode == BlasMode::kSymmetric) { epilogue( output_op, iterator_D, accumulators, iterator_C); } else { epilogue( output_op_her2k, iterator_D, accumulators, iterator_C); } // Next tile problem_visitor.advance(gridDim.x); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h", "repo_id": "cutlass", "token_count": 9104 }
38
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/barrier.h" #include "cutlass/block_striped.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/kernel_hardware_info.hpp" #include "cute/layout.hpp" #include "cute/tensor.hpp" namespace cutlass::gemm::kernel::detail { // Persistent Thread Block (TB) scheduler leveraging stream-K decomposition template < class TileShape, class ClusterShape > class PersistentTileSchedulerSm90StreamK { // // Data members // private: using UnderlyingScheduler = PersistentTileSchedulerSm90; private: using UnderlyingArguments = typename UnderlyingScheduler::Arguments; using UnderlyingParams = typename UnderlyingScheduler::Params; uint64_t current_work_linear_idx_ = 0; public: using RasterOrder = UnderlyingScheduler::RasterOrder; using RasterOrderOptions = UnderlyingScheduler::RasterOrderOptions; // Use a dummy barrier manager to simply get the type used to store the barrier using BarrierType = typename NamedBarrierManager<1>::T; using Params = PersistentTileSchedulerSm90StreamKParams; using ReductionMode = Params::ReductionMode; using DecompositionMode = Params::DecompositionMode; struct WorkTileInfo { int32_t M_idx = 0; int32_t N_idx = 0; int32_t K_idx = 0; int32_t L_idx = 0; // Number of k tiles to compute for this unit of work. For stream-K, this // can indicate the number of K tiles across multiple output tiles. uint32_t k_tile_count = 0; // Number of k tiles remaining for the work unit as a whole uint32_t k_tile_remaining = 0; // Whether this unit of work is the final split for the given tile bool is_separate_reduction = false; CUTLASS_HOST_DEVICE bool is_valid() const { // A work tile that computes no K tiles is invalid unless it is a separate-reduction work tile // (which only performs reduction and epilogue) return k_tile_count > 0 || is_separate_reduction; } CUTLASS_HOST_DEVICE bool is_reduction_unit() const { return is_separate_reduction; } CUTLASS_HOST_DEVICE int32_t reduction_subtile_idx() const { // For separate reduction units, the K_idx of the work tile is unused. // Therefore, we override it to contain the subtile of that the reduction // unit operates on. return is_reduction_unit() ? K_idx : -1; } CUTLASS_HOST_DEVICE void setup_separate_reduction(int32_t epilogue_subtile_idx) { // Set the epilogue subtile in the K_idx, since this is otherwise unused // by separate reduction units. K_idx = epilogue_subtile_idx; is_separate_reduction = true; k_tile_count = 0; // Clean up remaining k tiles k_tile_remaining = 0; } CUTLASS_HOST_DEVICE static WorkTileInfo invalid_work_tile() { return {-1, -1, -1, -1, 0}; } CUTLASS_HOST_DEVICE bool is_final_split(uint32_t k_tiles_per_output_tile) const { return (K_idx + k_tile_count) == k_tiles_per_output_tile; } }; struct Arguments { Arguments() = default; Arguments(Arguments const&) = default; Arguments(Arguments&&) = default; CUTLASS_HOST_DEVICE Arguments& operator=(Arguments const& args) { splits = args.splits; max_swizzle_size = args.max_swizzle_size; raster_order = args.raster_order; reduction_mode = args.reduction_mode; decomposition_mode = args.decomposition_mode; return *this; } CUTLASS_HOST_DEVICE Arguments& operator=(Arguments&& args) noexcept { splits = args.splits; max_swizzle_size = args.max_swizzle_size; raster_order = args.raster_order; reduction_mode = args.reduction_mode; decomposition_mode = args.decomposition_mode; return *this; } CUTLASS_HOST_DEVICE Arguments(int splits_) : splits(splits_) {} CUTLASS_HOST_DEVICE Arguments(int splits_, int max_swizzle_size_, RasterOrderOptions raster_order_, DecompositionMode decomposition_mode_) : splits(splits_), max_swizzle_size(max_swizzle_size_), raster_order(raster_order_), decomposition_mode(decomposition_mode_) {} // The splitting factor to be used in a split-K decomposition of the problem. // If this is set to a value greater than 1, stream-K decomposition logic // is bypassed in favor of a split-K decomposition. int splits = 1; int max_swizzle_size = 1; RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; ReductionMode reduction_mode = ReductionMode::Deterministic; DecompositionMode decomposition_mode = DecompositionMode::Heuristic; }; // Sink scheduler params as a member Params scheduler_params; // // Methods // template <class ProblemShape> static Params to_underlying_arguments( ProblemShape problem_shape, TileShape tile_shape, ClusterShape cluster_shape, KernelHardwareInfo const& hw_info, Arguments const& args, void* workspace, const uint32_t epilogue_subtile = 1) { static_assert(cute::is_static<TileShape>::value); static_assert(cute::is_static<ClusterShape>::value); auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); Params params; params.initialize( problem_blocks, k_tile_per_output_tile, to_gemm_coord(cluster_shape), hw_info, args.splits, args.max_swizzle_size, args.raster_order, args.reduction_mode, args.decomposition_mode, workspace, epilogue_subtile ); return params; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { // Split count > 1 is only valid for heuristic and split-K decomposition modes return (args.splits == 1 || args.decomposition_mode == DecompositionMode::Heuristic || args.decomposition_mode == DecompositionMode::SplitK); } CUTLASS_HOST_DEVICE PersistentTileSchedulerSm90StreamK() { }; CUTLASS_HOST_DEVICE PersistentTileSchedulerSm90StreamK(Params const& params_) : scheduler_params(params_) { if (params_.raster_order_ == RasterOrder::AlongN) { current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); } else { current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); } } CUTLASS_DEVICE WorkTileInfo get_current_work() const { return get_current_work_for_linear_idx(current_work_linear_idx_, scheduler_params); } CUTLASS_DEVICE static WorkTileInfo get_current_work_for_linear_idx(uint64_t linear_idx, Params const& params) { // The maximum number of work units is units_per_problem_ * splits_. // The multiplication by splits_ is used for handling split-K, in which // units_per_problem_ is equal to the total number of output tiles. To account // for the fact that we have splits_ peers per output tile, we multiply this // value by splits_. For stream-K, this multiplication ends up being a no-op // because splits_ is set to 1 for stream-K. if(linear_idx >= (params.units_per_problem_ * params.splits_ + params.separate_reduction_units_)) { // Invalid work. Return an empty result. return WorkTileInfo::invalid_work_tile(); } WorkTileInfo work_tile_info; assign_work(params, linear_idx, work_tile_info); return work_tile_info; } // Returns whether the current work_tile_info passed in should continue to be used. This // occurs only in the stream-K decomposition with stream-K work units, which encompass // work over multiple output tiles. If the current work_tile_info should continue to be // used, it is updated to advance to the next output tile it should cover. CUTLASS_DEVICE bool continue_current_work(WorkTileInfo& work_tile_info) const { return continue_current_work_for_linear_idx( current_work_linear_idx_, work_tile_info, scheduler_params); } CUTLASS_DEVICE static bool continue_current_work_for_linear_idx( uint64_t linear_idx, WorkTileInfo& work_tile_info, Params const& params) { work_tile_info.k_tile_remaining -= work_tile_info.k_tile_count; if (work_tile_info.k_tile_remaining == 0) { return false; } assign_work(params, linear_idx, work_tile_info); return work_tile_info.is_valid(); } CUTLASS_DEVICE void advance_to_next_work(uint32_t advance_count = 1) { current_work_linear_idx_ += uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z) * uint64_t(advance_count); } // Given the inputs, computes the total number of output blocks this problem will compute over // Note that this is only the logical size of our grid, not the physical grid we will actually launch. template <class ProblemShape> CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(ProblemShape problem_shape_mnkl, TileShape cta_shape, ClusterShape cluster_shape) { return UnderlyingScheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); } // Given the cluster shape, computes the physical grid we should launch. template <class ProblemShape> CUTLASS_HOST_DEVICE static dim3 get_grid_shape( ProblemShape problem_shape, TileShape tile_shape, ClusterShape cluster_shape, KernelHardwareInfo hw_info, Arguments arguments) { auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); return Params::get_grid_shape( problem_blocks, to_gemm_coord(cluster_shape), hw_info, arguments.max_swizzle_size, arguments.raster_order ); } // Returns whether fixup is needed for `work_tile_info`. CUTLASS_HOST_DEVICE static bool requires_fixup(Params const& params, WorkTileInfo const& work_tile_info) { // Fixup is not needed for invalid or data-parallel tiles return work_tile_info.is_valid() && work_tile_info.k_tile_count != params.divmod_tiles_per_output_tile_.divisor; } CUTLASS_HOST_DEVICE static bool requires_separate_reduction(Params const& params) { return params.requires_separate_reduction(); } // When the work tile is not special for reduction, it's valid. Otherwise need to skip // global loading that producer warpgroup do, also math computation that consumer warpgroup do. CUTLASS_DEVICE static bool valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) { return !work_tile_info.is_reduction_unit(); } // Performs the reduction across splits for a given output tile. template <class FrgTensorC> CUTLASS_DEVICE static void fixup( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx) { static constexpr uint32_t Offset = static_cast<int>(cutlass::arch::ReservedNamedBarriers::StreamkBarrier0); static constexpr uint32_t MaxNumNamedBarriers = 2; using BarrierManager = NamedBarrierManager<NumThreadsPerWarpGroup, Offset, MaxNumNamedBarriers>; return fixup_helper<FrgTensorC, BarrierManager>( params, work_tile_info, accumulators, num_barriers, barrier_idx); } // Helper for performing the reduction across splits for a given output tile. template <class FrgTensorC, class BarrierManager> CUTLASS_DEVICE static void fixup_helper( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx, uint32_t num_accumulator_mtxs = 1) { using ElementAccumulator = typename FrgTensorC::value_type; if (!requires_fixup(params, work_tile_info)) { return; } auto tile_idx = output_tile_index(params, work_tile_info); // Index of the lock on which to wait auto lock_idx = (tile_idx * num_barriers) + barrier_idx; auto reduction_tile_idx = tile_idx; auto [first_peer_id, my_peer_id, last_peer_id] = tile_peer_range(params, tile_idx, static_cast<uint32_t>(work_tile_info.K_idx)); auto reduction_peer_offset = 0; if (params.requires_separate_reduction()) { // If separate reduction is to be performed, each stream-K unit writes its partials // to a separate portion of the workspace. There are as many of these portions as there // are peers for a given output tile, so we multiply the tile index by the maximum peer count. reduction_tile_idx *= Params::max_peers_per_tile(params.sk_units_, params.sk_tiles_); reduction_peer_offset = my_peer_id * cute::size<0>(TileShape{}) * cute::size<1>(TileShape{}); } // Reductions use BlockStripedReduce with a width of BarrierManager::ThreadCount under the hood. // Thus, the start of the reduction space is the same across all threads in a warp group. int reduction_offset = (cute::size<0>(TileShape{}) * cute::size<1>(TileShape{}) * reduction_tile_idx * num_accumulator_mtxs) + reduction_peer_offset + (size(accumulators) * barrier_idx * BarrierManager::ThreadCount); ElementAccumulator* group_reduction_workspace = reinterpret_cast<ElementAccumulator*>(params.reduction_workspace_) + reduction_offset; using AccumulatorArrayT = Array<typename FrgTensorC::value_type, size(FrgTensorC{})>; using BlockStripedReduceT = BlockStripedReduce<BarrierManager::ThreadCount, AccumulatorArrayT>; AccumulatorArrayT* reduction_workspace_array = reinterpret_cast<AccumulatorArrayT*>(group_reduction_workspace); AccumulatorArrayT* accumulator_array = reinterpret_cast<AccumulatorArrayT*>(&accumulators); int barrier_group_thread_idx = threadIdx.x % BarrierManager::ThreadCount; // The number of tiles for which reduction is required is either: // (a) the total number of output tiles (in the case of split-K) // (b) the number of stream-K tiles (potentially multiplied by peer count if using separate reduction) // To calculate the total number of output tiles in the split-K case, we // note that, in the split-K case, the units_per_problem_ member of Params will be // the total number of output tiles. uint32_t reduction_tiles = 0; if (params.splits_ > 1) { reduction_tiles = params.units_per_problem_; } else if (params.requires_separate_reduction()) { reduction_tiles = params.sk_tiles_ * Params::max_peers_per_tile(params.sk_units_, params.sk_tiles_); } else { reduction_tiles = params.sk_tiles_; } auto reduction_workspace_size = Params::get_reduction_workspace_size( reduction_tiles, to_gemm_coord(TileShape{}), sizeof_bits<ElementAccumulator>::value, num_accumulator_mtxs); BarrierType* lock_workspace = reinterpret_cast<BarrierType*>( reinterpret_cast<uint8_t*>(params.reduction_workspace_) + reduction_workspace_size); if (work_tile_info.is_reduction_unit()) { plus<AccumulatorArrayT> add_fragments; auto peer_offset = size(accumulators) * num_barriers * BarrierManager::ThreadCount; // Wait until the peers collaborating on this output tile have all written // their accumulators to workspace. uint32_t num_peers = last_peer_id - first_peer_id + 1; BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, num_peers); // Load the first peer's data BlockStripedReduceT::load(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx); for (int i = 1; i < num_peers; ++i) { // Load peer fragment AccumulatorArrayT addend_fragment; auto peer_reduction_workspace = reinterpret_cast<AccumulatorArrayT*>(group_reduction_workspace + (i * peer_offset)); BlockStripedReduceT::load(addend_fragment, peer_reduction_workspace, barrier_group_thread_idx); // Add peer fragment *accumulator_array = add_fragments(*accumulator_array, addend_fragment); } } else if (!compute_epilogue(work_tile_info, params)) { if (params.requires_separate_reduction() || work_tile_info.K_idx == 0) { // The first peer initializes the workspace partials in the non-separate-reduction case, // and all peers write to their own location in workspace when using separate reduction BlockStripedReduceT::store(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); } else { // Wait until the preceding split added its accumulators BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); // Perform reduction in workspace BlockStripedReduceT::reduce(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); } // If separate reduction is being performed, each participating stream-K unit increments the barrier // by only 1. Otherwise, increment by the K tile count that this unit has processed. int32_t increment = params.requires_separate_reduction() ? 1 : work_tile_info.k_tile_count; // Signal our arrival BarrierManager::arrive_inc(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, increment); } else { if (params.reduction_mode_ == ReductionMode::Deterministic) { // Wait until the preceding split added its accumulators BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); } else { // Wait unitl the first split has stored its accumulators BarrierManager::wait_lt(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, 1); } // The block computing the final split for the tile adds previously-reduced partials // to its accumulators and computes the epilogue. BlockStripedReduceT::load_add(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx); } } // Returns whether the block assigned this work should compute the epilogue for the corresponding // output tile. For the case of stream-K, this should only occur if the work is marked as the final split. CUTLASS_HOST_DEVICE static bool compute_epilogue(WorkTileInfo const& work_tile_info, Params const& params) { // `is_final_split` will be set to `true` for the following scenarios, all of which must compute the epilogue: // 1. The tile is computed in data-parallel mode // 2. The tile is computed in split-/stream-K mode and this work unit represents the final split of the tile // 3. The tile is computed in split-/stream-K mode and separate reduction is used, and this is a separate reduction unit return work_tile_info.is_valid() && (work_tile_info.is_final_split(params.divmod_tiles_per_output_tile_.divisor) && !params.requires_separate_reduction()) || work_tile_info.is_separate_reduction; } // Returns the linearized index of the output tile corresponding to the tile with offset [L, M, K] CUTLASS_DEVICE static int output_tile_index(Params const& params, WorkTileInfo const& work_tile_info) { uint64_t linear_idx_in_batch = UnderlyingScheduler::get_linear_idx_from_m_and_n( work_tile_info.M_idx, work_tile_info.N_idx, params.divmod_cluster_shape_major_, params.divmod_cluster_shape_minor_, params.divmod_cluster_blk_major_, params.log_swizzle_size_, params.raster_order_ ); uint64_t tiles_mn = params.divmod_batch_.divisor; return tiles_mn * work_tile_info.L_idx + linear_idx_in_batch; } template <class ProblemShape, class ElementAccumulator> static size_t get_workspace_size( Arguments const& args, ProblemShape problem_shape, KernelHardwareInfo const& hw_info, uint32_t mma_warp_groups, const uint32_t epilogue_subtile = 1) { auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); ClusterShape cluster_shape; TileShape tile_shape; dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); return Params::get_workspace_size( problem_blocks, k_tile_per_output_tile, to_gemm_coord(tile_shape), to_gemm_coord(cluster_shape), hw_info, args.splits, args.max_swizzle_size, args.raster_order, args.decomposition_mode, mma_warp_groups, sizeof_bits<BarrierType>::value, sizeof_bits<ElementAccumulator>::value, epilogue_subtile ); } template <class ProblemShape, class ElementAccumulator> static cutlass::Status initialize_workspace( Arguments const& args, void* workspace, cudaStream_t stream, ProblemShape const& problem_shape, KernelHardwareInfo const& hw_info, uint32_t mma_warp_groups, const uint32_t epilogue_subtile = 1) { auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); ClusterShape cluster_shape; TileShape tile_shape; dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); return Params::initialize_workspace( workspace, stream, problem_blocks, k_tile_per_output_tile, to_gemm_coord(tile_shape), to_gemm_coord(cluster_shape), hw_info, args.splits, args.max_swizzle_size, args.raster_order, args.decomposition_mode, mma_warp_groups, sizeof_bits<BarrierType>::value, sizeof_bits<ElementAccumulator>::value, epilogue_subtile ); } template <class ProblemShape> CUTLASS_HOST_DEVICE static int get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape, TileShape) { return work_tile_info.k_tile_count; } CUTLASS_HOST_DEVICE static uint32_t get_work_k_tile_start(WorkTileInfo const& work_tile_info) { return work_tile_info.K_idx; } private: // Sets the current stream-K work to compute within work_tile_info. If new_unit is true, work_tile_info // is populated as a new unit of work. Otherwise, state existing in work_tile_info (e.g., remaining // iterations) is used to find the next tile in the current work unit. CUTLASS_DEVICE static void assign_work( Params const& params, uint64_t linear_idx, WorkTileInfo& work_tile_info) { uint64_t output_tile_id = linear_idx; if (linear_idx >= params.units_per_problem_ * params.splits_) { // Separate-reduction work auto cluster_size = params.get_cluster_size(); // Divide up the linearized separate reduction units into clusters auto cluster_linear_reduction_unit_idx = params.div_cluster_size((linear_idx - params.units_per_problem_)); uint64_t cluster_tile_idx, epi_subtile_idx; params.divmod_epilogue_subtile_(cluster_tile_idx, epi_subtile_idx, cluster_linear_reduction_unit_idx); // Bring the linearized tile ID back into the space of tiles, rather than clusters output_tile_id = cluster_tile_idx * cluster_size; work_tile_info.setup_separate_reduction(epi_subtile_idx); } else if (linear_idx >= params.sk_units_ && params.splits_ == 1) { // Data-parallel work output_tile_id = linear_idx - params.sk_units_ + params.sk_tiles_; work_tile_info.K_idx = 0; work_tile_info.k_tile_count = params.divmod_tiles_per_output_tile_.divisor; work_tile_info.k_tile_remaining = params.divmod_tiles_per_output_tile_.divisor; } else { // In the CUTLASS 2.x implementation of stream K, stream-K work is assigned to each stream-K // threadblock individually. For the most part, the set of K iterations corresponding to stream-K // work was divided amongst stream-K threadblocks, and a threadblock determined which tile // it would compute a (potentially-partial) output tile for based on the space of k iterations // assigned to it. This often results in stream-K threadblocks processing tiles with different // offsets in the K dimension from one another. This can reduce locality, but is lmitied to the // (generally few) waves of threadblocks assigned to compute stream-K work. // // With the introduction of threadblock clusters, there is additional benefit to maintaining // locality in the K dimension: shared portions of operands can be multicasted to threadblocks // within a cluster. Thus, we would like to ensure that the assignment of stream-K work to // threadblocks respects the ability to perform multicasting. // // To do so, we divide up the linearized stream-K units into clusters and share the same K // offsets for work within clusters. auto cluster_linear_work_idx = params.div_cluster_size(linear_idx); uint64_t group_idx; params.divmod_sk_groups_(cluster_linear_work_idx, group_idx, cluster_linear_work_idx); // Determine whether we are in a "big group" that will process an additional // stream-K cluster tile. auto sk_cluster_tiles = params.div_cluster_size(params.sk_tiles_); auto sk_cluster_tiles_in_group = params.divmod_sk_groups_.divide(sk_cluster_tiles); if (group_idx < params.big_groups_) { ++sk_cluster_tiles_in_group; } // Determine whether we are in a "big unit" within the group, that will process // an additional K chunk in the group. auto sk_tiles_in_group = sk_cluster_tiles_in_group * params.get_cluster_size(); auto k_tiles_in_group = sk_tiles_in_group * params.divmod_tiles_per_output_tile_.divisor; auto k_tiles_per_unit_in_group = params.divmod_sk_units_per_group_.divide(k_tiles_in_group); auto big_units_in_group = params.div_cluster_size( k_tiles_in_group - (k_tiles_per_unit_in_group * params.divmod_sk_units_per_group_.divisor)); uint64_t split; params.divmod_clusters_mnl_(split, cluster_linear_work_idx, cluster_linear_work_idx); bool is_split_k = params.splits_ > 1; auto big_unit_cmp_lhs = is_split_k ? split : cluster_linear_work_idx; auto big_unit_cmp_rhs = is_split_k ? params.big_units_ : big_units_in_group; auto linear_idx_mult = is_split_k ? params.divmod_tiles_per_output_tile_.divisor : k_tiles_per_unit_in_group; auto k_tiles_per_split = is_split_k ? params.k_tiles_per_sk_unit_ : k_tiles_per_unit_in_group; // Determine the starting k iteration computed by this stream-K work unit uint32_t unit_iter_start = (linear_idx_mult * cluster_linear_work_idx) + (k_tiles_per_split * split); // Adjust the starting position and number of k iterations for "big units," which // compute one extra iteration. If there are any big units, they will be the first // in the linearized ID space. auto k_tiles_in_my_split = k_tiles_per_split; if (big_unit_cmp_lhs < big_unit_cmp_rhs) { // Since the "big units" are the first units in the linearized ID space, each // of the units preceding this big unit computed one extra iteration. Thus, // we must offset our start iteration by the number of units that precede // the current unit in the linearized ID space. unit_iter_start += big_unit_cmp_lhs; ++k_tiles_in_my_split; } else { // Increment by one for each of the big clusters (since all big units precede this unit) unit_iter_start += big_unit_cmp_rhs; } if (!is_split_k) { // Adjust the unit starting position and number of tiles to avoid // computing splits of size less than min_iters_per_sk_unit_ int unused, start_tile_k_tile; params.divmod_tiles_per_output_tile_(unused, start_tile_k_tile, unit_iter_start); if (start_tile_k_tile < Params::min_iters_per_sk_unit_) { // Starting K tile is in range [0, Params::min_iters_per_sk_unit_), which means that another // stream-K unit will be computing a split with fewer than Params::min_iters_per_sk_unit_ K tiles. // Adjust our work to take over these K tiles. unit_iter_start -= start_tile_k_tile; k_tiles_in_my_split += start_tile_k_tile; } else if (start_tile_k_tile > (params.divmod_tiles_per_output_tile_.divisor - Params::min_iters_per_sk_unit_)) { // Starting K tile is within the final Params::min_iters_per_sk_unit_ K tiles of some output tile, // which means that this unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles. // Adjust our work to shed these K tiles to a neighboring stream-K unit that will compute more consecutive K tiles. auto adjustment_tiles = (params.divmod_tiles_per_output_tile_.divisor - start_tile_k_tile); unit_iter_start += adjustment_tiles; k_tiles_in_my_split -= adjustment_tiles; } } if (work_tile_info.k_tile_count == 0) { // This is a new unit if (!is_split_k) { // // Adjust the unit ending position and number of tiles to avoid // computing splits of size less than min_iters_per_sk_unit_ // // Begin by assuming that no adjustment is needed auto initial_unit_iter_end = unit_iter_start + k_tiles_in_my_split; int unused, end_tile_k_tile; params.divmod_tiles_per_output_tile_(unused, end_tile_k_tile, initial_unit_iter_end); if (end_tile_k_tile < Params::min_iters_per_sk_unit_) { // Ending K tile is within the first Params::min_iters_per_sk_unit_ K tiles of some output tile, // which means that this unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles. // Adjust our work to shed these K tiles to a neighboring stream-K unit that will compute more consecutive K tiles. k_tiles_in_my_split -= end_tile_k_tile; } else if (end_tile_k_tile > (params.divmod_tiles_per_output_tile_.divisor - Params::min_iters_per_sk_unit_)) { // Ending K tile is within the final Params::min_iters_per_sk_unit_ K tiles of some output tile, // which means that some other unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles. // Adjust our work to take on these K tiles. k_tiles_in_my_split += (params.divmod_tiles_per_output_tile_.divisor - end_tile_k_tile); } } work_tile_info.k_tile_remaining = k_tiles_in_my_split; } uint32_t unit_iter_end = unit_iter_start + work_tile_info.k_tile_remaining - 1; // Find the output tile corresponding to the final k tile covered by this // work unit. Stream-K work units will work backwards in terms of the tiles they // are responsible computing. This is beneficial because the final (partial) // tile computed by a stream-K block is typically the beginning of the output // tile, while the beginning (partial) tile is typically the ending of another // output tile. Since ending portions of an output tile must reduce across // other work units computing portions of that output tile, it is preferable // for them to be computed later, so as to reduce the likelihood of blocking // on other work. auto output_tile_id_in_group = params.divmod_tiles_per_output_tile_.divide(unit_iter_end); uint32_t output_tile_iter_start = output_tile_id_in_group * params.divmod_tiles_per_output_tile_.divisor; uint32_t output_tile_iter_end = output_tile_iter_start + params.divmod_tiles_per_output_tile_.divisor; // Convert the output tile from the linearized space within each group to the // overall linearized space. output_tile_id = (output_tile_id_in_group * params.divmod_sk_groups_.divisor) + group_idx; // Bring the linearized tile ID back into the space of tiles, rather than clusters output_tile_id *= params.get_cluster_size(); auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); // The final linearized tile ID is in units of the cluster dimension over which we rasterize. if (params.raster_order_ == RasterOrder::AlongN) { output_tile_id += cta_n_in_cluster * params.divmod_cluster_shape_minor_.divisor; } else { output_tile_id += cta_m_in_cluster * params.divmod_cluster_shape_minor_.divisor; } // The unit's starting k iteration in the current tile is either the starting // iteration for the tile as a whole, or the starting k iteration for the unit // as a whole (if the latter is greater than the former). uint32_t tile_iter_start = max(output_tile_iter_start, unit_iter_start); // Similarly, the unit's ending k iteration (exclusive) is either the end of // the current tile it is assigned, or the ending iteration of the unit as a whole // (if the latter is less than the former). uint32_t tile_iter_end = min(output_tile_iter_end, unit_iter_end + 1); // Set the k offset to be the starting k tile for this output tile work_tile_info.K_idx = static_cast<int32_t>(tile_iter_start - output_tile_iter_start); work_tile_info.k_tile_count = tile_iter_end - tile_iter_start; } uint64_t work_idx_l, remainder; params.divmod_batch_(work_idx_l, remainder, output_tile_id); uint64_t cta_per_grid_dim = params.divmod_cluster_shape_minor_.divide(remainder); auto [work_idx_m, work_idx_n] = UnderlyingScheduler::get_work_idx_m_and_n( cta_per_grid_dim, params.divmod_cluster_shape_major_, params.divmod_cluster_shape_minor_, params.divmod_cluster_blk_major_, params.log_swizzle_size_, params.raster_order_ ); // Set the M, N, and L block offsets work_tile_info.M_idx = work_idx_m; work_tile_info.N_idx = work_idx_n; work_tile_info.L_idx = static_cast<int32_t>(work_idx_l); } // Returns the starting and ending peer ID of this tile CUTLASS_HOST_DEVICE static auto tile_peer_range(Params const& params, uint32_t tile_idx, uint32_t cur_k_tile) { auto tile_idx_in_cluster_path = params.div_cluster_size(tile_idx); auto start_k_tile = params.divmod_tiles_per_output_tile_.divisor * tile_idx_in_cluster_path; auto end_k_tile = start_k_tile + params.divmod_tiles_per_output_tile_.divisor - 1; auto big_unit_k_tiles = params.big_units_ * (params.k_tiles_per_sk_unit_ + 1); auto adjust_unit = [&](uint32_t k_tile, uint32_t unit_idx, uint32_t k_tiles_per_unit) { auto unit_k_start = unit_idx * k_tiles_per_unit; auto unit_k_end = unit_k_start + k_tiles_per_unit; if (k_tile - start_k_tile < Params::min_iters_per_sk_unit_ && unit_k_end - start_k_tile < Params::min_iters_per_sk_unit_) { // k_tile is within the first min_iters_per_sk_unit_ K tiles of this output tile, // and the stream-K unit computes fewer than min_iters_per_sk_unit_ K tiles for this // output tile. This work will thus be subsumed by the next stream-K unit. ++unit_idx; } if (end_k_tile + 1 - k_tile < Params::min_iters_per_sk_unit_ && end_k_tile + 1 - unit_k_start < Params::min_iters_per_sk_unit_) { // k_tile is within the last min_iters_per_sk_unit_ K tiles of this output tile, // and the stream-K unit computes fewer than min_iters_per_sk_unit_ K tiles for this // output tile. This work will thus be subsumed by the previous stream-K unit. --unit_idx; } return unit_idx; }; // Lambda to find the ID of the stream-K unit that computes this K tile auto find_unit = [&](uint32_t k_tile) { if (k_tile < big_unit_k_tiles) { // The tile is within the "big unit range" auto k_tiles_per_unit = params.k_tiles_per_sk_unit_ + 1; auto unit_idx = k_tile / k_tiles_per_unit; return static_cast<uint64_t>(adjust_unit(k_tile, unit_idx, k_tiles_per_unit)); } else { // The tile is after the "big unit range." Account for this by finding the "normal unit" // that it belongs to, and then offsetting by the number of big units auto k_tiles_per_unit = params.k_tiles_per_sk_unit_; auto unit_idx = ((k_tile - big_unit_k_tiles) / params.k_tiles_per_sk_unit_) + (params.big_units_); return static_cast<uint64_t>(adjust_unit(k_tile, unit_idx, k_tiles_per_unit)); } }; return cute::make_tuple(find_unit(start_k_tile), find_unit(cur_k_tile), find_unit(end_k_tile)); } }; } // namespace cutlass::gemm::kernel::detail
cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp", "repo_id": "cutlass", "token_count": 14895 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" #include "cutlass/gemm/threadblock/mma_multistage.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex double-precision /// /// A: column-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, InstructionShape_, complex<double>, layout::ColumnMajor, complex<double>, layout::RowMajor, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = layout::ColumnMajor; using ElementB = complex<double>; using LayoutB = layout::RowMajor; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped 128 static int const kAccessSizeInBits = 128; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for complex double-precision /// /// A: column-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, InstructionShape_, complex<double>, layout::ColumnMajor, complex<double>, layout::ColumnMajor, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = layout::ColumnMajor; using ElementB = complex<double>; using LayoutB = layout::ColumnMajor; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; using Operator = Operator_; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped 128 static int const kAccessSizeInBits = 128; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex double-precision /// /// A: row-major /// B: column-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, InstructionShape_, complex<double>, layout::RowMajor, complex<double>, layout::ColumnMajor, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = layout::RowMajor; using ElementB = complex<double>; using LayoutB = layout::ColumnMajor; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped 128 static int const kAccessSizeInBits = 128; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for complex double-precision /// /// A: row-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, InstructionShape_, complex<double>, layout::RowMajor, complex<double>, layout::RowMajor, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = layout::RowMajor; using ElementB = complex<double>; using LayoutB = layout::RowMajor; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped 128 static int const kAccessSizeInBits = 128; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex floating-point /// /// A: column-major /// B: column-major /// Operator: arch::OpMultiplyAddComplex /// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, layout::ColumnMajor, complex<float>, layout::ColumnMajor, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = layout::ColumnMajor; using ElementB = complex<float>; using LayoutB = layout::ColumnMajor; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped static int const kAccessSizeInBits = 64; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for complex floating-point /// /// A: column-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex /// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, layout::ColumnMajor, complex<float>, layout::RowMajor, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = layout::ColumnMajor; using ElementB = complex<float>; using LayoutB = layout::RowMajor; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped static int const kAccessSizeInBits = 64; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex floating-point /// /// A: row-major /// B: column-major /// Operator: arch::OpMultiplyAddComplex /// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, layout::RowMajor, complex<float>, layout::ColumnMajor, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = layout::RowMajor; using ElementB = complex<float>; using LayoutB = layout::ColumnMajor; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped static int const kAccessSizeInBits = 64; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex floating-point /// /// A: row-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex /// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, layout::RowMajor, complex<float>, layout::RowMajor, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = layout::RowMajor; using ElementB = complex<float>; using LayoutB = layout::RowMajor; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped static int const kAccessSizeInBits = 64; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, kTransformA, kTransformB, Operator>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex SIMT operation /// /// A: column-major /// B: column-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, typename RealA, typename RealB, typename RealC, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<1, 1, 1>, complex<RealA>, layout::ColumnMajor, complex<RealB>, layout::ColumnMajor, complex<RealC>, LayoutC_, arch::OpClassSimt, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = complex<RealA>; using LayoutA = layout::ColumnMajor; using ElementB = complex<RealB>; using LayoutB = layout::ColumnMajor; using ElementC = complex<RealC>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of access static int const kAccessSizeInBits = sizeof_bits<ElementA>::value; /// No vectorized accesses static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) 1, /// 1 partition along K dimension kTransformA, /// Transform for A kTransformB /// Transform for B >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for complex SIMT operation /// /// A: column-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, typename RealA, typename RealB, typename RealC, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<1, 1, 1>, complex<RealA>, layout::ColumnMajor, complex<RealB>, layout::RowMajor, complex<RealC>, LayoutC_, arch::OpClassSimt, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = complex<RealA>; using LayoutA = layout::ColumnMajor; using ElementB = complex<RealB>; using LayoutB = layout::RowMajor; using ElementC = complex<RealC>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of access static int const kAccessSizeInBits = sizeof_bits<ElementA>::value; /// No vectorized accesses static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) 1, /// 1 partition along K dimension kTransformA, /// Transform for A kTransformB /// Transform for B >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, // or Shape::kK / 32 WarpCount::kK>; }; /// Partial specialization for complex SIMT operation /// /// A: row-major /// B: column-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, typename RealA, typename RealB, typename RealC, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<1, 1, 1>, complex<RealA>, layout::RowMajor, complex<RealB>, layout::ColumnMajor, complex<RealC>, LayoutC_, arch::OpClassSimt, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = complex<RealA>; using LayoutA = layout::RowMajor; using ElementB = complex<RealB>; using LayoutB = layout::ColumnMajor; using ElementC = complex<RealC>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of access static int const kAccessSizeInBits = sizeof_bits<ElementA>::value; /// No vectorized accesses static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) 1, /// 1 partition along K dimension kTransformA, /// Transform for A kTransformB /// Transform for B >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for complex SIMT operation /// /// A: row-major /// B: row-major /// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, typename RealA, typename RealB, typename RealC, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Complex transformation on operand A ComplexTransform TransformA, /// Complex transformation on operand B ComplexTransform TransformB, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMultistageMmaComplexCore< Shape_, WarpShape_, GemmShape<1, 1, 1>, complex<RealA>, layout::RowMajor, complex<RealB>, layout::RowMajor, complex<RealC>, LayoutC_, arch::OpClassSimt, Stages, TransformA, TransformB, Operator_, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = complex<RealA>; using LayoutA = layout::RowMajor; using ElementB = complex<RealB>; using LayoutB = layout::RowMajor; using ElementC = complex<RealC>; using LayoutC = LayoutC_; static int const kStages = Stages; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Operator = Operator_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of access static int const kAccessSizeInBits = sizeof_bits<ElementA>::value; /// No vectorized accesses static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) 1, /// 1 partition along K dimension kTransformA, /// Transform for A kTransformB /// Transform for B >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, 0>, // or Shape::kK / 32 WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h", "repo_id": "cutlass", "token_count": 22422 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Used for partial specialization typename Enable = bool > class MmaSingleStage : public MmaBase<Shape_, Policy_, 1> { public: ///< Base class using Base = MmaBase<Shape_, Policy_, 1>; using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory using ElementC = ElementC_; ///< Data type of accumulator matrix using LayoutC = LayoutC_; ///< Layout of accumulator matrix using Policy = Policy_; ///< Policy describing tuning details using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; // // Dependent types // /// Fragment of operand A loaded from global memory using FragmentA = typename IteratorA::Fragment; /// Fragment of operand B loaded from global memory using FragmentB = typename IteratorB::Fragment; /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; using ArchTag = arch::Sm70; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; // staticaly assert kStages for MmaSingleStage is 1 (single stage mma pipeline) static_assert((Base::kStages==1), "MmaSingleStage requires kStages set to value 1"); private: using WarpFragmentA = typename Operator::FragmentA; using WarpFragmentB = typename Operator::FragmentB; protected: /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaSingleStage( typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM int thread_idx, ///< ID within the threadblock int warp_idx, ///< ID of warp int lane_idx ///< ID of each thread within a warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( int gemm_k_iterations, ///< number of iterations of the mainloop FragmentC &accum, ///< destination accumulator tile IteratorA iterator_A, ///< iterator over A operand in global memory IteratorB iterator_B, ///< iterator over B operand in global memory FragmentC const &src_accum) { ///< source accumualtor tile // // Prologue // // Perform accumulation in the 'd' output operand accum = src_accum; FragmentA tb_frag_A; FragmentB tb_frag_B; tb_frag_A.clear(); tb_frag_B.clear(); // The last kblock is loaded in the prolog iterator_A.load(tb_frag_A); iterator_B.load(tb_frag_B); ++iterator_A; ++iterator_B; // Pair of fragments used to overlap shared memory loads and math instructions WarpFragmentA warp_frag_A; WarpFragmentB warp_frag_B; Operator warp_mma; // Avoid reading out of bounds iterator_A.clear_mask(gemm_k_iterations <= 1); iterator_B.clear_mask(gemm_k_iterations <= 1); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > 0; --gemm_k_iterations) { this->smem_iterator_A_.store(tb_frag_A); this->smem_iterator_B_.store(tb_frag_B); __syncthreads(); // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group // as the case may be. this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_frag_A); this->warp_tile_iterator_B_.load(warp_frag_B); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; warp_mma(accum, warp_frag_A, warp_frag_B, accum); } // Add negative offsets to return smem load iterators to the 'start' of the shared memory this->warp_tile_iterator_A_.add_tile_offset({0, -Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset({-Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); __syncthreads(); iterator_A.load(tb_frag_A); iterator_B.load(tb_frag_B); ++iterator_A; ++iterator_B; // Avoid reading out of bounds if this was the last loop iteration iterator_A.clear_mask(gemm_k_iterations <= 2); iterator_B.clear_mask(gemm_k_iterations <= 2); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h", "repo_id": "cutlass", "token_count": 3396 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/complex.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/functional.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { namespace detail { template < /// Data type of real & imag members of complex numbers in the SourceFragment typename RealElement, /// Destination fragment required by the mma operation typename DestinationFragment, /// Source fragment holding complex<RealElement> elements typename SourceFragment, /// Number of mma operations performed typename MmaIterations, /// Shape of operand elements typename MmaOperandShape, /// Complex transform on A operand ComplexTransform Transform_, /// Operand A or Operand B Operand Operand_, /// Floating-point rounding style for big part FloatRoundStyle RoundBig_, /// Floating-point rounding style for small part FloatRoundStyle RoundSmall_> struct UnpackComplexConvertAndPackForMmaFastF32; // Partial specialization for OperandA and Congruous smem layout template < typename RealElement, typename DestinationFragment, typename SourceFragment, typename MmaIterations, typename MmaOperandShape, ComplexTransform Transform_, FloatRoundStyle RoundBig_, FloatRoundStyle RoundSmall_> struct UnpackComplexConvertAndPackForMmaFastF32 < RealElement, DestinationFragment, SourceFragment, MmaIterations, MmaOperandShape, Transform_, Operand::kA, RoundBig_, RoundSmall_> { // // Type definitions // static Operand const kOperand = Operand::kA; static ComplexTransform const kTransform = Transform_; static FloatRoundStyle const kRoundBig = RoundBig_; static FloatRoundStyle const kRoundSmall = RoundSmall_; // Data type of elements in the destination fragment using MmaElement = typename DestinationFragment::Element; // Numeric convertor MmaElementBig, MmaElementSmall <= RealElement using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>; // Operand layout parameters using SourceFragmentLayout = layout::ColumnMajor; static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow; // BigSmall Fragment holding two TF32 elements (big, small) for every float using BigSmallFragment = Array<MmaElement, 2>; /// Index in fargments for the big and small part static int const kBigIndex = 0; static int const kSmallIndex = 1; /// Ctor CUTLASS_DEVICE UnpackComplexConvertAndPackForMmaFastF32() {} CUTLASS_DEVICE void operator()(DestinationFragment *dest, SourceFragment const &source) { Converter convert_op; SourceFragmentLayout layout(kLdm); DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest); DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kRow * 2]); CUTLASS_PRAGMA_UNROLL for(int i=0; i<MmaIterations::kRow; i++) { int pos = 0; CUTLASS_PRAGMA_UNROLL for(int c=0; c<MmaOperandShape::kColumn; c++) { CUTLASS_PRAGMA_UNROLL for(int r=0; r<MmaOperandShape::kRow; r++) { // Logical position of element in source fragment int row = r + i * MmaOperandShape::kRow; int col = c; // Access complex<RealElement> and apply rounding on real and imag parts BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real()); BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag()); // Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation dest_big_[i][pos] = a[kBigIndex]; dest_big_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]); // Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation dest_small_[i][pos] = a[kSmallIndex]; dest_small_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]); // Next position pos++; } } } } }; // Partial specialization for OperandB and Congruous smem layout template < typename RealElement, typename DestinationFragment, typename SourceFragment, typename MmaIterations, typename MmaOperandShape, ComplexTransform Transform_, FloatRoundStyle RoundBig_, FloatRoundStyle RoundSmall_> struct UnpackComplexConvertAndPackForMmaFastF32 < RealElement, DestinationFragment, SourceFragment, MmaIterations, MmaOperandShape, Transform_, Operand::kB, RoundBig_, RoundSmall_> { // // Type definitions // static Operand const kOperand = Operand::kB; static ComplexTransform const kTransform = Transform_; static FloatRoundStyle const kRoundBig = RoundBig_; static FloatRoundStyle const kRoundSmall = RoundSmall_; // Data type of elements in the destination fragment using MmaElement = typename DestinationFragment::Element; // Numeric convertor MmaElementBig, MmaElementSmall <= RealElement using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>; // Operand layout parameters using SourceFragmentLayout = layout::RowMajor; static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn; // BigSmall Fragment holding two TF32 elements (big, small) for every float using BigSmallFragment = Array<MmaElement, 2>; /// Index in fargments for the big and small part static int const kBigIndex = 0; static int const kSmallIndex = 1; /// Ctor CUTLASS_DEVICE UnpackComplexConvertAndPackForMmaFastF32() {} CUTLASS_HOST_DEVICE void operator()(DestinationFragment *dest, SourceFragment const &source) { Converter convert_op; SourceFragmentLayout layout(kLdm); DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest); DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kColumn * 2]); CUTLASS_PRAGMA_UNROLL for(int i=0; i<MmaIterations::kColumn; i++) { int pos = 0; CUTLASS_PRAGMA_UNROLL for(int c=0; c<MmaOperandShape::kColumn; c++) { CUTLASS_PRAGMA_UNROLL for(int r=0; r<MmaOperandShape::kRow; r++) { // Logical position of element in source fragment int row = r; int col = c + i * MmaOperandShape::kColumn; // Access complex<RealElement> apply rounding on real and imag parts BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real()); BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag()); // Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation dest_big_[i][pos] = a[kBigIndex]; dest_big_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]); // Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation dest_small_[i][pos] = a[kSmallIndex]; dest_small_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]); // next position pos++; } } } } }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Complex transform on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transform on B operand ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool > class MmaComplexTensorOpFastF32; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex*complex+complex => complex: // Operands data type: complex<float> // Rounding: float -> tfloat32_t (round half_ulp_truncate nearest) // Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 // Output data type: complex<float> // ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB, /// Used for partial specialization typename Enable > class MmaComplexTensorOpFastF32< Shape_, complex<float>, LayoutA_, complex<float>, LayoutB_, complex<float>, LayoutC_, Policy_, TransformA, TransformB, Enable> { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of members of complex multiplicand A using RealElementA = float; /// Data type of multiplicand A using ElementA = complex<RealElementA>; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of members of complex multiplicand B using RealElementB = float; /// Data type of multiplicand B using ElementB = complex<RealElementB>; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of members of complex accumulator matrix C using RealElementC = float; /// Data type of accumulator matrix C using ElementC = complex<RealElementC>; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename Policy::Operator; /// Shape of underlying instruction using InstructionShape = typename ArchMmaOperator::Shape; /// Underlying arch tag using ArchTag = typename ArchMmaOperator::ArchTag; /// Indicates class of matrix operator using OperatorClass = arch::OpClassTensorOp; /// Indicates math operator using MathOperator = arch::OpMultiplyAddComplexFastF32; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; /// Number of threads participating in warp-level matrix product static int const kThreadCount = 32; /// Tune F32 to TF32 big small conversion for complex<float> operation /// Different combination of big small conversin can cause different tradeoff /// between speed and accuracy. Generally, use round_half_ulp_truncate can /// improve the performance but hur the accuracy. using ComplexFastF32 = FastF32 < FloatRoundStyle::round_toward_zero, // kRoundBigA FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA FloatRoundStyle::round_toward_zero, // kRoundBigB FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB TensorFloat32Op::k3xTF32 // Number of TF32 operations >; /// Index in fargments for the big and small part static int const kBigIndex = 0; static int const kSmallIndex = 1; public: /// Iterates over the A operand in memory using IteratorA = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA, MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>, Policy::OpDelta::kRow, 32, 1 >; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile // (4 times the original FragmentA::kElements) // (real_big), (imag_big), (real_small), (imag_small) using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2 * 2>; // Fragment bisecting big and small sections // (real_big, imag_big), (real_small, imag_small) using AccessTypeFragmentA = Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>; /// Iterates over the B operand in memory using IteratorB = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>, Policy::OpDelta::kColumn, 32, 1 >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed B tile // (4 times the original FragmentB::kElements) // (real_big), (imag_big), (real_small), (imag_small) using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2 * 2>; // Fragment bisecting big and small sections // (real_big, imag_big), (real_small, imag_small) using AccessTypeFragmentB = Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>; static_assert( !(Shape::kM % ArchMmaOperator::Shape::kM) && !(Shape::kN % ArchMmaOperator::Shape::kN), "Shape of warp-level Mma must be divisible by operator shape."); /// Number of complex products operations performed (one complex product needs four mma instructions) using MmaIterations = MatrixShape< Shape::kM / ArchMmaOperator::Shape::kM, Shape::kN / ArchMmaOperator::Shape::kN >; /// Iterates over the C operand in memory using IteratorC = MmaTensorOpAccumulatorTileIterator< MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, typename ArchMmaOperator::Shape, typename Policy::OpDelta>; /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this /// storage arrangement is to be considered 'planar complex' in the sense that all real-valued /// parts are stored consecutively followed by all imaginary parts. This matches the structure /// of Tensor Cores which are always real-valued matrix multiplies. using FragmentC = typename IteratorC::Fragment; // // Alias types for underlying real-valued matrix multiply operator // using InstMmaOperandA = typename ArchMmaOperator::FragmentA; using InstMmaOperandB = typename ArchMmaOperator::FragmentB; using MmaOperandC = typename ArchMmaOperator::FragmentC; static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value, "This implementation only supports mma.m16n8k8 math instructions."); static_assert(InstMmaOperandA::kElements == 4, "This implementation only supports math instructions in which exactly four element is needed for the A operand." "We can geneneralize later."); static_assert(InstMmaOperandB::kElements == 2, "This implementation only supports math instructions in which exactly two element is needed for the B operand." "We can geneneralize later."); private: // // Data members // /// Underlying real-valued matrix multiply operator (concept: arch::Mma) ArchMmaOperator mma; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaComplexTensorOpFastF32() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, TransformedFragmentA const &A, TransformedFragmentB const &B, FragmentC const &C ) const { AccessTypeFragmentA const *complex_A = reinterpret_cast<AccessTypeFragmentA const*>(&A); AccessTypeFragmentB const *complex_B = reinterpret_cast<AccessTypeFragmentB const*>(&B); // // Accumulate in place // D = C; complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kBigIndex], D); complex_mma_operator(D, complex_A[kBigIndex], complex_B[kSmallIndex], D); complex_mma_operator(D, complex_A[kBigIndex], complex_B[kBigIndex], D); if (ComplexFastF32::kPrecision == TensorFloat32Op::k4xTF32) complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kSmallIndex], D); } /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void complex_mma_operator( FragmentC &D, AccessTypeFragmentA const &complex_A, AccessTypeFragmentB const &complex_B, FragmentC const &C ) const { // Instruction Operands A & B holding real part followed by imaginary part for mma operations InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&complex_A); InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&complex_B); CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { // mma(accum.real(), a.real(), b.real(), accum.real()); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { // Real-valued accumulator part MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) + (m + n * MmaIterations::kRow); mma(*accum, operand_A[m], operand_B[n], *accum); } // mma(accum.imag(), a.real(), b.imag(), accum.imag()); CUTLASS_PRAGMA_UNROLL for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { // Complex-valued accumulator part MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) + (m + n * MmaIterations::kRow) + MmaIterations::kCount; mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum); } // mma(accum.real(), a.imag(), -b.imag(), accum.real()) CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { // negate OperandB to accumulate -(a.imag()*b.imag()) // negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements negate<InstMmaOperandB> negate_op; // Real-valued accumulator part MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) + (m + n * MmaIterations::kRow); mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum); } // mma(accum.imag(), a.imag(), b.real(), accum.imag()) CUTLASS_PRAGMA_UNROLL for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { // Complex-valued accumulator part MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) + (m + n * MmaIterations::kRow) + MmaIterations::kCount; mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum); } } } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { detail::UnpackComplexConvertAndPackForMmaFastF32 < RealElementA, InstMmaOperandA, FragmentA, MmaIterations, MatrixShape<2, 2>, kTransformA, Operand::kA, ComplexFastF32::kRoundBigA, ComplexFastF32::kRoundSmallA> convert_A; detail::UnpackComplexConvertAndPackForMmaFastF32 < RealElementB, InstMmaOperandB, FragmentB, MmaIterations, MatrixShape<2, 1>, kTransformB, Operand::kB, ComplexFastF32::kRoundBigB, ComplexFastF32::kRoundSmallB> convert_B; // Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A); convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h", "repo_id": "cutlass", "token_count": 8071 }
42
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity Operand Operand, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads, /// Number of partitions along K dimension int PartitionsK_ = 1> class MmaTensorOpMultiplicandTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, 64>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, 64>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Determine number of elements along outer dimension per individual LDSM op static int const kLdsmOpOuter = Layout::kElementsPerAccess; static int const kLdsmOpInner = 8; static_assert(!(Shape::kContiguous % kLdsmOpOuter), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); static_assert(!(Shape::kStrided % kLdsmOpInner), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); /// Shape of one individual LDSM instruction static int const LdsmShapeStrided = InstructionShape::kStrided / kLdsmOpInner; static int const LdsmShapeContiguous = 4 / LdsmShapeStrided; using LdsmShape = layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>; /// Number and arrangement of LDSM instructions using LdsmIterations = layout::PitchLinearShape< Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous, 1>; /// Number of groups for each tile static int const kGroupsPerTile = Shape::kStrided / InstructionShape::kStrided; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Number of internal pointers needed to reference shared memory static int const kPointerCount = Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous; /// Pointer type used for accesses using AccessType = Array<Element, Layout::kElementsPerAccess>; /// Internal counter used to jump to next K partition int k_group_idx_; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_[kPointerCount]; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { int quad_pair = (lane_id >> 3); int quad_quad = (lane_id >> 4); int lane_in_quad = (lane_id & 3); int lane_in_quad_pair = (lane_id & 7); int lane_in_quad_quad = (lane_id & 15); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount; ++i) { int partition_contiguous_idx = -1; int access_contiguous_idx = -1; int access_strided_idx = -1; if (Policy::LdsmShape::kContiguous == 4) { // Matrix multiply 1688 A/B // Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block). // Four blocks are next to each other in the contiguous dimension. partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ i); access_contiguous_idx = (quad_pair ^ lane_in_quad); access_strided_idx = lane_in_quad_pair; } else if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kA) { // Matrix multiply 16816 A // Q0 Q1 // Q2 Q3 partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1)); access_contiguous_idx = (((quad_pair & 1) + ((i & 1) << 1)) ^ lane_in_quad); access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3); } else if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kB) { // Matrix multiply 16816 B // Q0 Q2 // Q1 Q3 partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1)); access_contiguous_idx = ((quad_quad + ((i & 1) << 1)) ^ lane_in_quad); access_strided_idx = lane_in_quad_quad; } else if (Policy::LdsmShape::kContiguous == 1) { // Matrix multiply 16832.SP B // Q0 // Q1 // Q2 // Q3 partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 2)); access_contiguous_idx = ((i & 3) ^ lane_in_quad); access_strided_idx = lane_id; } int access_contiguous = partition_contiguous_idx * Layout::PartitionShape::kContiguous + access_contiguous_idx; int access_strided = access_strided_idx; pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); if (Shape::kContiguous == Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) { if (tile_offset.contiguous() % 2) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount / 2; ++i) { AccessType const *tmp_pointer = pointer_[i]; pointer_[i] = pointer_[i + kPointerCount / 2]; pointer_[i + kPointerCount / 2] = tmp_pointer; } } contiguous_offset = (tile_offset.contiguous() >> 1) << 1; } int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ * Layout::kElementsPerAccess + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == Policy::kGroupsPerTile) { k_group_idx_ = 0; add_tile_offset( {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); } } return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr = reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_[c % kPointerCount] + Layout::TileShape::kContiguous * (c / kPointerCount) + Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>( fetch_ptr[access_idx], source_byte_ptr ); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no op } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread MMA.TF32 NT TensorOps. It /// uses LDS.32 to load from shared memory and therefore must be initialized /// with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCongruous<32, 32>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for " "A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous<32, 32>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Determine number of elements along outer dimension per individual 32bit // shared memory load op. Every one warp of 32bit shared memory load loads // 8x4 elements static int const kLdsOpInner = Layout::TileShape::kStrided; static int const kLdsOpOuter = kThreads / kLdsOpInner; static_assert(!(Shape::kContiguous % kLdsOpOuter), "Shape of warp-level mma must be divisible by 32bit " "fundamental tile size."); static_assert(!(Shape::kStrided % kLdsOpInner), "Shape of warp-level mma must be divisible by 32bit " "fundamental tile size."); /// Number of 32 bit shared memory load instructions needed by one MMA instruction /// 1688 A 2x2 /// 1688 B 1x2 /// 16816 B 1x4 static int const LdsShapeContiguous = InstructionShape::kContiguous / kLdsOpOuter; static int const LdsShapeStrided = InstructionShape::kStrided / kLdsOpInner; using LdsShape = layout::PitchLinearShape<LdsShapeContiguous, LdsShapeStrided>; /// Number and arrangement of LDS instructions using LdsIterations = layout::PitchLinearShape< Shape::kContiguous / LdsShapeContiguous / kLdsOpOuter, 1>; /// Number of groups for each tile static int const kGroupsPerTile = Shape::kStrided / InstructionShape::kStrided; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Number of internal pointers needed to reference shared memory static int const kPointerCount = Layout::TileShape::kContiguous * Layout::kElementsPerAccess / Policy::kLdsOpOuter; /// Vectorized access is not used static int const kElementsPerAccess = 1; /// Pointer type used for accesses using AccessType = Element; /// Internal counter used to jump to next K partition int k_group_idx_; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_[kPointerCount]; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() : stride_(0), byte_offset_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : stride_(ref.stride(0)), byte_offset_(0), k_group_idx_(0) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount; ++i) { int access_strided = lane_id % Policy::kLdsOpInner; int access_contiguous = (lane_id / Policy::kLdsOpInner) + (access_strided ^ i) * Policy::kLdsOpOuter; pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); if (Shape::kContiguous == Layout::TileShape::kContiguous * Layout::kElementsPerAccess / 2) { if (tile_offset.contiguous() % 2) { // Matrix multiply 1688 pointer_[0] <=> pointer_[4] pointer_[1] <=> pointer_[5] // pointer_[2] <=> pointer_[6] pointer_[3] <=> pointer_[7] CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount / 2; ++i) { AccessType const *tmp_pointer = pointer_[i]; pointer_[i] = pointer_[i + kPointerCount / 2]; pointer_[i + kPointerCount / 2] = tmp_pointer; } } contiguous_offset = (tile_offset.contiguous() >> 1) << 1; } int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == Policy::kGroupsPerTile) { k_group_idx_ = 0; add_tile_offset( {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); } } return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator--() { byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Element *fetch_ptr = reinterpret_cast<Element *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int ss = 0; ss < Policy::LdsShape::kStrided; ++ss) { CUTLASS_PRAGMA_UNROLL for (int cc = 0; cc < Policy::LdsShape::kContiguous; ++cc) { int access_idx = cc + (ss + (c + s * Policy::LdsIterations::kContiguous) * Policy::LdsShape::kStrided) * Policy::LdsShape::kContiguous; int access_idx_contiguous = cc + c * Policy::LdsShape::kContiguous; int access_idx_strided = (ss + s * Policy::LdsShape::kStrided) * Policy::kLdsOpInner; AccessType const *source_ptr = pointer_[access_idx_contiguous % kPointerCount] + Layout::TileShape::kContiguous * Layout::kElementsPerAccess * (access_idx_contiguous / kPointerCount) + access_idx_strided * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; fetch_ptr[access_idx] = *reinterpret_cast<Element const *>(source_byte_ptr); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no op } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps with 64B warp tile /// the contiguous dimension. This assumes Threadblock contiguous dimension has /// the same size as the warp tile. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// This specialization can be merged into the general one. Most code is the same. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCongruous<16, 32>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Element number when the layout crosses static int const kCrosswise = 32; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Determine number of elements along outer dimension per individual LDSM op static int const kLdsmOpOuter = Layout::kElementsPerAccess; static int const kLdsmOpInner = 8; static_assert(!(Shape::kContiguous % kLdsmOpOuter), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); static_assert(!(Shape::kStrided % kLdsmOpInner), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); /// Shape of one individual LDSM instruction static int const LdsmShapeStrided = InstructionShape::kStrided / kLdsmOpInner; static int const LdsmShapeContiguous = 4 / LdsmShapeStrided; using LdsmShape = layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>; /// Number and arrangement of LDSM instructions using LdsmIterations = layout::PitchLinearShape< Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous, 1>; /// Number of groups for each tile static int const kGroupsPerTile = Shape::kStrided / InstructionShape::kStrided; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Number of internal pointers needed to reference shared memory static int const kPointerCount = Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous / Layout::kFactor; /// Pointer type used for accesses using AccessType = Array<Element, Layout::kElementsPerAccess>; /// Internal counter used to jump to next K partition int k_group_idx_; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_[kPointerCount]; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { int quad_pair = (lane_id >> 3); int quad_quad = (lane_id >> 4); //int lane_in_quad = (lane_id & 3); int lane_in_quad_pair = (lane_id & 7); int lane_in_quad_quad = (lane_id & 15); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount; ++i) { int partition_contiguous_idx = -1; int access_contiguous_idx = -1; int access_strided_idx = -1; if (Policy::LdsmShape::kContiguous == 4) { // Matrix multiply 1688 A/B // Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block). // Four blocks are next to each other in the contiguous dimension. partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = quad_pair ^ (lane_in_quad_pair / Layout::kFactor); access_strided_idx = lane_in_quad_pair / Layout::kFactor; } else if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kA) { // Matrix multiply 16816 A // Q0 Q1 // Q2 Q3 partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = (((quad_pair & 1) + i * 2) ^ (lane_in_quad_pair / Layout::kFactor)); access_strided_idx = (lane_in_quad_pair + (lane_id >> 4 << 3)) / 2; } else if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kB) { // Matrix multiply 16816 B // Q0 Q2 // Q1 Q3 partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = (quad_quad + i * 2) ^ (lane_in_quad_pair / Layout::kFactor); access_strided_idx = (lane_in_quad_quad / Layout::kFactor); } int access_contiguous = partition_contiguous_idx * Layout::PartitionShape::kContiguous + access_contiguous_idx; int access_strided = access_strided_idx; pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); if (Shape::kContiguous == Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) { if (tile_offset.contiguous() % 2) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount / 2; ++i) { AccessType const *tmp_pointer = pointer_[i]; pointer_[i] = pointer_[i + kPointerCount / 2]; pointer_[i + kPointerCount / 2] = tmp_pointer; } } contiguous_offset = (tile_offset.contiguous() >> 1) << 1; } int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ * Layout::kElementsPerAccess / Layout::kFactor + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == Policy::kGroupsPerTile) { k_group_idx_ = 0; add_tile_offset( {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); } } return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr = reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_[c % kPointerCount] + Layout::TileShape::kContiguous * (c / kPointerCount) + Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_ / Layout::kFactor; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>( fetch_ptr[access_idx], source_byte_ptr ); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_ / Layout::kFactor; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no op } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps with 32B warp tile /// the contiguous dimension. This assumes Threadblock contiguous dimension has /// the same size as the warp tile. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// This specialization can be merged into the general one. Most code is the same. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCongruous<16, 16>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Element number when the layout crosses static int const kCrosswise = 16; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Determine number of elements along outer dimension per individual LDSM op static int const kLdsmOpOuter = Layout::kElementsPerAccess; static int const kLdsmOpInner = 8; static_assert(!(Shape::kContiguous % kLdsmOpOuter), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); static_assert(!(Shape::kStrided % kLdsmOpInner), "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); /// Shape of one individual LDSM instruction static int const LdsmShapeStrided = InstructionShape::kStrided / kLdsmOpInner; static int const LdsmShapeContiguous = 4 / LdsmShapeStrided; using LdsmShape = layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>; /// Number and arrangement of LDSM instructions using LdsmIterations = layout::PitchLinearShape< Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous, 1>; /// Number of groups for each tile static int const kGroupsPerTile = Shape::kStrided / InstructionShape::kStrided; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Number of internal pointers needed to reference shared memory static int const kPointerCount = Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous / Layout::kFactor; /// Pointer type used for accesses using AccessType = Array<Element, Layout::kElementsPerAccess>; /// Internal counter used to jump to next K partition int k_group_idx_; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_[kPointerCount]; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { //int quad_pair = (lane_id >> 3); int quad_quad = (lane_id >> 4); int lane_in_pair = (lane_id & 1); int lane_in_quad = (lane_id & 3); int lane_in_quad_pair = (lane_id & 7); int lane_in_quad_quad = (lane_id & 15); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount; ++i) { int partition_contiguous_idx = -1; int access_contiguous_idx = -1; int access_strided_idx = -1; if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kA) { // Matrix multiply 16816 A // Q0 Q1 // Q2 Q3 partition_contiguous_idx = lane_in_quad / 2; access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2; access_contiguous_idx = ((lane_in_pair * 2 + ((lane_id & 8) >> 3)) ^ access_strided_idx); } else if (Policy::LdsmShape::kContiguous == 2 && kOperand == Operand::kB) { // Matrix multiply 16816 B // Q0 Q2 // Q1 Q3 partition_contiguous_idx = lane_in_quad / 2; access_strided_idx = lane_in_quad_quad / Layout::kFactor; access_contiguous_idx = ((lane_in_pair * 2 + quad_quad) ^ access_strided_idx); } int access_contiguous = partition_contiguous_idx * Layout::PartitionShape::kContiguous + access_contiguous_idx; int access_strided = access_strided_idx; pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); if (Shape::kContiguous == Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) { if (tile_offset.contiguous() % 2) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount / 2; ++i) { AccessType const *tmp_pointer = pointer_[i]; pointer_[i] = pointer_[i + kPointerCount / 2]; pointer_[i + kPointerCount / 2] = tmp_pointer; } } contiguous_offset = (tile_offset.contiguous() >> 1) << 1; } int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ * Layout::kElementsPerAccess / Layout::kFactor + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == Policy::kGroupsPerTile) { k_group_idx_ = 0; add_tile_offset( {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); } } return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr = reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_[c % kPointerCount] + Layout::TileShape::kContiguous * (c / kPointerCount) + Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_ / Layout::kFactor; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>( fetch_ptr[access_idx], source_byte_ptr ); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_ / Layout::kFactor; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no op } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Element number when the layout crosses (in units of elements) int Crosswise, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA, "MmaTensorOpMultiplicandIterator for ColumnMajor Congruous may " "only be instantiated for A operand to warp-level Mma."); /// Element type using Element = Element_; /// MBlock or NBlock size static int const kCrosswise = Crosswise; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, kCrosswise>, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Element number when the layout crosses (in units of elements) int Crosswise, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator for RowMajor Congruous may " "only be instantiated for B operand to warp-level Mma."); /// Element type using Element = Element_; /// Element number when the layout crosses static int const kCrosswise = Crosswise; /// Layout of source tile using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, kCrosswise>, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Element number when the layout crosses (in units of elements) int Crosswise, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for " "A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Element number when the layout crosses static int const kCrosswise = Crosswise; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Determine number of elements along outer dimension per individual LDSM op static int const kLdsmOpOuter = Layout::kElementsPerAccess; static int const kLdsmOpInner = 8; static_assert(!(Shape::kContiguous % kLdsmOpOuter), "Shape of warp-level mma must be divisible by LDSM's " "fundamental tile size."); static_assert(!(Shape::kStrided % kLdsmOpInner), "Shape of warp-level mma must be divisible by LDSM's " "fundamental tile size."); /// Shape of one individual LDSM instruction static int const LdsmShapeContiguous = InstructionShape::kContiguous / kLdsmOpOuter; static int const LdsmShapeStrided = ((4 / LdsmShapeContiguous * kLdsmOpInner) > Shape::kStrided) ? (Shape::kStrided / kLdsmOpInner) : (4 / LdsmShapeContiguous); using LdsmShape = layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>; /// Number and arrangement of LDSM instructions using LdsmIterations = layout::PitchLinearShape<1, Shape::kStrided / kLdsmOpInner / LdsmShape::kStrided>; /// static int const kGroupsPerTile = Layout::TileShape::kContiguous / Layout::kFactor / LdsmShape::kContiguous; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = Array<Element, Layout::kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>; private: /// Total number of sections. The memory is divided into stages. One stage /// can store one tile. Stage is divided into sections. Interleaved layout /// can have multiple sections in a stage. The rest layout only has one section /// in a stage. int sections_; /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Internal counter used to determine when to increment byte offset and when /// to XOR it int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() : pointer_(nullptr), sections_(0), stride_(0), byte_offset_(0), k_group_idx_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : pointer_(reinterpret_cast<AccessType const *>(ref.data())), sections_(ref.stride(0) / kCrosswise), // stride_ = kCrosswise x sections_ x kFactor stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { // Warp level iterator at most use double buffer to hide latency. If there // are more than 2 sections, every stage should have more than 1 section. // Turing silicon requires all 32 threads in a warp provide valid addresses // even for LDSM.1 and LDSM.2 #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 750)) lane_id = lane_id % (Policy::LdsmShape::kCount * Policy::kLdsmOpInner); #endif int quad_quad = (lane_id >> 4); int quad_pair = (lane_id >> 3); int lane_in_pair = (lane_id & 1); int lane_in_quad = (lane_id & 3); int lane_in_quad_pair = (lane_id & 7); int lane_in_quad_quad = (lane_id & 15); int partition_contiguous_idx = -1; int access_contiguous_idx = -1; int access_strided_idx = -1; if (Layout::kFactor == 4) { // Super Integer matrix multiply Interleaved-32 int factor_in_partition = (Layout::PartitionShape::kContiguous * Layout::kFactor / Layout::TileShape::kContiguous); if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { // Integer matrix multiply 8816 A/B partition_contiguous_idx = lane_in_quad / factor_in_partition; access_contiguous_idx = ((lane_in_pair * factor_in_partition) ^ (lane_in_quad_quad / Layout::kFactor)); access_strided_idx = lane_id / Layout::kFactor; } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kA) { // Integer matrix multiply 16832 A partition_contiguous_idx = lane_in_quad / factor_in_partition; access_strided_idx = lane_in_quad_quad / Layout::kFactor; access_contiguous_idx = ((lane_in_pair * factor_in_partition + quad_quad) ^ access_strided_idx); } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kB) { // Integer matrix multiply 16832 B partition_contiguous_idx = lane_in_quad / factor_in_partition; access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2; access_contiguous_idx = ((lane_in_pair * factor_in_partition + ((lane_id & 8) >> 3)) ^ access_strided_idx); } } else if (Layout::kFactor == 2) { // Super Matrix multiply kBlock = 32 if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { // Matrix multiply 1688 A/B // (Q stands for 1 8x128bit block). // Q0 // Q1 // Q2 // Q3 // Four blocks are next to each other in the strided dimension. partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = (lane_in_quad_pair / Layout::kFactor); access_strided_idx = lane_id / Layout::kFactor; } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kA) { // Matrix multiply 16816|1688.TF32 A // Q0 Q2 // Q1 Q3 partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = (quad_quad ^ (lane_in_quad_pair / Layout::kFactor)); access_strided_idx = (lane_in_quad_quad / Layout::kFactor); } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kB) { // Matrix multiply 16816|1688.TF32 B // Q0 Q1 // Q2 Q3 partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = ((quad_pair & 1) ^ (lane_in_quad_pair / Layout::kFactor)); access_strided_idx = (lane_in_quad_pair + (lane_id >> 4 << 3)) / Layout::kFactor; } else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) { // Matrix multiply 16832.SP B // Q0 Q1 Q2 Q3 partition_contiguous_idx = (lane_id % Layout::kFactor); access_contiguous_idx = (quad_pair ^ (lane_in_quad_pair / Layout::kFactor)); access_strided_idx = lane_in_quad_pair / Layout::kFactor; } } else if (Layout::kFactor == 1) { // Super Matrix multiply kBlock = 64 if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { // Q0 // Q1 // Q2 // Q3 partition_contiguous_idx = (lane_in_quad_pair >> 2); access_contiguous_idx = lane_in_quad; access_strided_idx = lane_id; } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kA) { // Matrix multiply 16816|1688.TF32 A // Q0 Q2 // Q1 Q3 partition_contiguous_idx = (lane_in_quad_pair >> 2); access_contiguous_idx = (quad_quad ^ lane_in_quad); access_strided_idx = lane_in_quad_quad; } else if (Policy::LdsmShape::kStrided == (Policy::LdsmShape::kCount / 2) && kOperand == Operand::kB) { // Matrix multiply 16816|1688.TF32 B // Q0 Q1 // Q2 Q3 partition_contiguous_idx = (lane_in_quad_pair >> 2); access_contiguous_idx = ((quad_pair & 1) ^ lane_in_quad); access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3); } else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) { // Matrix multiply 16832.SP B // Q0 Q1 Q2 Q3 partition_contiguous_idx = (lane_in_quad_pair >> 2); access_contiguous_idx = (quad_pair ^ lane_in_quad); access_strided_idx = lane_in_quad_pair; } } int access_contiguous = partition_contiguous_idx * Layout::PartitionShape::kContiguous + access_contiguous_idx; int access_strided = access_strided_idx; byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof_bits<Element>::value / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; byte_offset_ ^= k_groups_delta * sizeof_bits<Element>::value * Layout::kElementsPerAccess * Policy::LdsmShape::kContiguous / 8; pointer_ += tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor + whole_tiles * stride_ / sections_; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( TensorCoord const &tile_offset) { int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; if (k_groups_delta < 0) { whole_tiles -= 1; k_groups_delta += Policy::kGroupsPerTile; } if ((Policy::kGroupsPerTile / kPartitionsK) >= 2) { byte_offset_ ^= (k_groups_delta & 1) * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; } if ((Policy::kGroupsPerTile / kPartitionsK) >= 4) { byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 1)) & 2) * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; } if ((Policy::kGroupsPerTile / kPartitionsK) == 8) { byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 3)) & 4) * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; } k_group_idx_ += k_groups_delta; whole_tiles += k_group_idx_ / (Policy::kGroupsPerTile / kPartitionsK); k_group_idx_ = k_group_idx_ % (Policy::kGroupsPerTile / kPartitionsK); pointer_ += tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor + whole_tiles * stride_ / sections_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator++() { // Integer matrix multiply 16832 Interleaved-32 // NONE // Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32 // Integer matrix multiply 8816 Interleaved-32 // ^1 ^1 // Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64 // Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64 // ^1 ^3 ^1 ^3 // Matrix multiply 1688 kblock=64 // ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7 // Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64 // ^2 ^2 // Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128 // ^2 ^6 ^2 ^6 if ((Policy::kGroupsPerTile / kPartitionsK) > 1) { int mask = ((Policy::kGroupsPerTile / kPartitionsK) == 8) ? 3 : (((Policy::kGroupsPerTile / kPartitionsK) == 4) ? 1 : 0); if (((k_group_idx_ & mask) % 2) == 0) byte_offset_ ^= 1 * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; else if ((k_group_idx_ & mask) == 1) byte_offset_ ^= 3 * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; else if ((k_group_idx_ & mask) == 3) byte_offset_ ^= 7 * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8; } k_group_idx_++; if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) { k_group_idx_ = 0; add_tile_offset({Policy::kGroupsPerTile, 0}); } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator--() { assert(0); } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr = reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsmShape::kContiguous * c + Policy::kLdsmOpInner / Layout::kFactor * Policy::LdsmShape::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>( fetch_ptr[access_idx], source_byte_ptr); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * Shape::kStrided * stride_; byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Element number when the layout crosses (in units of elements) int Crosswise, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator for ColumnMajor Crosswise may " "only be instantiated for B operand to warp-level Mma."); /// Element type using Element = Element_; /// KBlock size static int const kCrosswise = Crosswise; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, kCrosswise>, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : iterator_({ref.data(), ref.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( TensorCoord const &tile_offset) { iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Element number when the layout crosses (in units of elements) int Crosswise, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA, "MmaTensorOpMultiplicandIterator for RowMajor Crosswise may " "only be instantiated for A operand to warp-level Mma."); /// Element type using Element = Element_; /// Element number when the layout crosses static int const kCrosswise = Crosswise; /// Layout of source tile using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kCrosswise>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, kCrosswise>, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : iterator_({ref.data(), ref.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( TensorCoord const &tile_offset) { iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpAccumulatorTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major /// accumulator layout. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpAccumulatorTileIterator< Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static bool const kDivisible = !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN >; }; private: // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements // of that row. The accumulators within one row are assumed to be consecutive. static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array< Element, Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n}); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; offset_ref.at({accum_m, accum_n}) = frag[idx]; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. /// /// This iterator is not tested. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpAccumulatorTileIterator< Shape_, Element_, cutlass::layout::AffineRankN<2>, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static bool const kDivisible = !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN >; }; private: // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements // of that row. The accumulators within one row are assumed to be consecutive. static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array< Element, Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n}); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; offset_ref.at({accum_m, accum_n}) = frag[idx]; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major /// accumulator layout. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpAccumulatorTileIterator<Shape_, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static bool const kDivisible = !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN >; }; private: // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements // of that row. The accumulators within one row are assumed to be consecutive. static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; frag[idx] = offset_ref.at({accum_m, accum_n}); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; offset_ref.at({accum_m, accum_n}) = frag[idx]; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major /// accumulator layout. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element typ typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_, /// Interleaved N int InterleavedN> class MmaTensorOpAccumulatorTileIterator< Shape_, Element_, cutlass::layout::ColumnMajorInterleaved<InterleavedN>, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorInterleaved<InterleavedN>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM, Shape::kColumn / InstructionShape::kN>; }; private: static int const kElementsPerAccess = 2; public: // // Derived quantities // using AccessType = Array<Element, kElementsPerAccess>; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kCount / kThreads>; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int accum_m = mma_m * InstructionShape::kM; int accum_n = mma_n * InstructionShape::kN; int idx = mma_m + mma_n * Policy::MmaIterations::kRow; AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() + offset_ref.offset(TensorCoord(accum_m, accum_n))); frag_ptr[idx] = access_ptr[0]; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int accum_m = mma_m * InstructionShape::kM; int accum_n = mma_n * InstructionShape::kN; int idx = mma_m + mma_n * Policy::MmaIterations::kRow; AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() + offset_ref.offset(TensorCoord(accum_m, accum_n))); access_ptr[0] = frag_ptr[idx]; } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major /// accumulator layout. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element typ typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_, /// Interleaved N int InterleavedN> class MmaTensorOpAccumulatorTileIterator< Shape_, Element_, cutlass::layout::TensorNCxHWx<InterleavedN>, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = int8_t; /// Layout of source tile using Layout = cutlass::layout::TensorNCxHWx<InterleavedN>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); /// Number of elements in strided dimension that each STG writes static int const kStridedPerSTG = 8; /// Factor to calculate reorder index to pack accumulator. static int const kPackedFactor = Shape::kColumn / 32; /// Number of mma operations performed using MmaIterations = MatrixShape<Shape::kRow / kStridedPerSTG, Shape::kColumn / InterleavedN>; }; private: static int const kElementsPerAccess = InterleavedN / 4; public: // // Derived quantities // struct alignas((kElementsPerAccess * sizeof_bits<Element>::value / 8)) AccessType { Array<Element, kElementsPerAccess> storage; }; /// Fragment object holding a thread's part of a tile using Fragment = Array<int32_t, Shape::kCount / kThreads>; private: /// Reference to output tensor TensorRef ref_; /// Row offset index globally LongIndex global_offset_row_; /// Column offset index globally LongIndex global_offset_col_; /// Output tensor size TensorCoord extent_; /// Alpha float alpha_; /// Beta float beta_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int const lane_id, TensorCoord extent, float alpha = 1.0f, float beta = 0.0f ): ref_(ref), extent_(extent), alpha_(alpha), beta_(beta) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); global_offset_row_ = quad; global_offset_col_ = lane_in_quad * kElementsPerAccess; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(MatrixCoord const &tile_offset) { global_offset_row_ += tile_offset.row() * Shape::kRow; global_offset_col_ += tile_offset.column() * Shape::kColumn; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kN; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kM; ++mma_m) { int accum_m = mma_m * InstructionShape::kM; int accum_n = mma_n * InstructionShape::kN; int idx = mma_m + mma_n * Policy::MmaIterations::kM; AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() + accum_m * offset_ref.stride(0) + accum_n); frag_ptr[idx] = access_ptr[0]; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); Array<float, Shape::kCount / kThreads> output_frag_f; Array<Element, Shape::kCount / kThreads> output_frag; LongIndex pq = extent_.h() * extent_.w(); LongIndex extent_row = extent_.n() * pq; LongIndex extent_col = extent_.c(); LongIndex k_major = (global_offset_col_ / InterleavedN) * pq; Index k_minor = global_offset_col_ % InterleavedN; LongIndex k_offset = k_major * InterleavedN + k_minor; LongIndex k_offset_delta = pq * InterleavedN; LongIndex stride_n = pq * extent_.c(); Index n; LongIndex pq_rem; unsigned int pq_mul, pq_shr; find_divisor(pq_mul, pq_shr, pq); if(beta_ == 0.0f) { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < int(frag.size()); ++i) { output_frag_f[i] = frag[i]; } if(InstructionShape::kM == Policy::kStridedPerSTG) { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < int(frag.size()); ++i) { output_frag[i] = (Element)(output_frag_f[i] * alpha_); } } else { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < int(frag.size()); ++i) { int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor) + (i % (8 * Policy::kPackedFactor)) / 2 * 4 + (i % (8 * Policy::kPackedFactor)) % 2 + (i / (8 * Policy::kPackedFactor)) % 2 * 2; output_frag[i] = (Element)(output_frag_f[map_i] * alpha_); } } AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag); CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int accum_m = mma_m * Policy::kStridedPerSTG; fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr); LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN; CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { int accum_n = mma_n * InterleavedN; int idx = mma_n + mma_m * Policy::MmaIterations::kColumn; if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) { AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() + offset_m + mma_n * k_offset_delta); access_ptr[0] = frag_ptr[idx]; } } } } else { if(InstructionShape::kM == Policy::kStridedPerSTG) { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < int(frag.size()); ++i) { output_frag_f[i] = frag[i]; } } else { CUTLASS_PRAGMA_UNROLL for(int i = 0; i < int(frag.size()); ++i) { int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor) + (i % (8 * Policy::kPackedFactor)) / 2 * 4 + (i % (8 * Policy::kPackedFactor)) % 2 + (i / (8 * Policy::kPackedFactor)) % 2 * 2; output_frag_f[i] = frag[map_i]; } } AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag); Array<Element, kElementsPerAccess> ref_frag; AccessType *ref_frag_ptr = reinterpret_cast<AccessType *>(&ref_frag); CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int accum_m = mma_m * Policy::kStridedPerSTG; fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr); LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN; CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { int accum_n = mma_n * InterleavedN; int idx = mma_n + mma_m * Policy::MmaIterations::kColumn; if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) { AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() + offset_m + mma_n * k_offset_delta); ref_frag_ptr[0] = access_ptr[0]; CUTLASS_PRAGMA_UNROLL for(int i = 0; i < kElementsPerAccess; ++i) { output_frag[idx * kElementsPerAccess + i] = Element(alpha_ * output_frag_f[idx * kElementsPerAccess + i] + beta_ * ref_frag[i]); } access_ptr[0] = frag_ptr[idx]; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h", "repo_id": "cutlass", "token_count": 59772 }
43
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over densely packed tensors in global memory */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Mixed-precision reduction template < typename ElementAccumulator_, typename Element_, int Count = 1 > struct ReduceAdd { // // Type definitions // using ElementAccumulator = ElementAccumulator_; using Element = Element_; static int const kCount = Count; using FragmentAccumulator = cutlass::Array<ElementAccumulator, kCount>; using FragmentElement = cutlass::Array<Element, kCount>; struct Params { }; // // Data members // /// Parameters object Params params; // // Methods // /// Constructor CUTLASS_HOST_DEVICE ReduceAdd(Params params_ = Params()): params(params_) { } /// Operator CUTLASS_HOST_DEVICE FragmentAccumulator operator()( FragmentAccumulator accumulator, FragmentElement element) const { plus<FragmentAccumulator> op; NumericArrayConverter< ElementAccumulator, Element, kCount, PreferredRoundingMode<ElementAccumulator, Element>::kRound> converter; return op(accumulator, converter(element)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Special handling for binary operators template <typename ReductionOp, typename Element, int N> struct VectorizeArrayOperation { using ValueType = Array<Element, N>; CUTLASS_HOST_DEVICE ValueType operator()( ReductionOp const &reduction_op, ValueType const &lhs, ValueType const &rhs) const { ValueType result; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = reduction_op(lhs[i], rhs[i]); } return result; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename ReductionOp, typename Element, int N> struct ReduceArrayOperation { using ArrayType = Array<Element, N>; CUTLASS_HOST_DEVICE Element operator()( ReductionOp const &reduction_op, ArrayType const &array) const { Element item = reduction_op(array[0], array[1]); CUTLASS_PRAGMA_UNROLL for (int i = 2; i < N; ++i) { item = reduction_op(item, array[i]); } return item; } }; template <int N> struct ReduceArrayOperation<logical_and<uint1b_t>, uint1b_t, N> { using ArrayType = Array<uint1b_t, N>; CUTLASS_HOST_DEVICE uint1b_t operator()( logical_and<uint1b_t> const &reduction_op, ArrayType const &array) const { uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array); bool item = false; CUTLASS_PRAGMA_UNROLL for (int byte = 0; byte < (N + 7) / 8; ++byte) { uint8_t bits = ptr[byte]; item = (item || !bits); } return uint1b_t(!item); } }; template <int N> struct ReduceArrayOperation<logical_or<uint1b_t>, uint1b_t, N> { using ArrayType = Array<uint1b_t, N>; CUTLASS_HOST_DEVICE uint1b_t operator()( logical_and<uint1b_t> const &reduction_op, ArrayType const &array) const { uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array); bool item = true; CUTLASS_PRAGMA_UNROLL for (int byte = 0; byte < (N + 7) / 8; ++byte) { uint8_t bits = ptr[byte]; item = (item || bits); } return uint1b_t(item); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper function to infer template argument types template <typename ReductionOp, typename Element, int N> CUTLASS_HOST_DEVICE Array<Element, N> ApplyArrayOperator( ReductionOp const &reduction_op, Array<Element, N> const &lhs, Array<Element, N> const &rhs) { VectorizeArrayOperation<ReductionOp, Element, N> vectorize_op; return vectorize_op(reduction_op, lhs, rhs); } /// Helper to reduce an array template <typename ReductionOp, typename Element, int N> Element ReduceArray(ReductionOp const &reduction_op, Array<Element, N> const &array) { ReduceArrayOperation<ReductionOp, Element, N> reduce_array_op; return reduce_array_op(reduction_op, array); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace reduction } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/reduction/thread/reduction_operators.h/0
{ "file_path": "cutlass/include/cutlass/reduction/thread/reduction_operators.h", "repo_id": "cutlass", "token_count": 2057 }
44
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/complex.h" namespace cutlass { namespace transform { namespace thread { namespace UnaryTransform { struct Identity; ///< None (i.e., identity) struct Conjugate; ///< Complex conjugate } /// Element-wise unary operator that transforms one element of a fragment at a time template< typename FragmentIn, ///< Input Fragment typename FragmentOut,///< Output Fragment typename Transform> ///< Unary transform operator class UnaryOp { public: CUTLASS_DEVICE static FragmentOut execute(FragmentIn &in) { static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match."); static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value || platform::is_same<Transform, UnaryTransform::Conjugate>::value, "Unary Operator not supported."); FragmentOut out; if (platform::is_same<Transform, UnaryTransform::Identity>::value ) { CUTLASS_PRAGMA_UNROLL for (int i=0; i < FragmentIn::kElements; ++i){ out[i] = static_cast<typename FragmentOut::Element>(in[i]); } } else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value ) { for (int i=0; i < FragmentIn::kElements; ++i){ out[i] = conj(static_cast<typename FragmentOut::Element>(in[i])); } } return out; } }; template<typename FragmentIn, typename Transform> class UnaryOp<FragmentIn, FragmentIn, Transform> { public: CUTLASS_DEVICE static FragmentIn execute(FragmentIn &in) { static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value || platform::is_same<Transform, UnaryTransform::Conjugate>::value, "Unary Operator not supported."); if (platform::is_same<Transform, UnaryTransform::Identity>::value ) { return in; } else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value ) { for(int i=0; i < FragmentIn::kElements; ++i){ in[i] = conj(in[i]); } } return in; } }; } } }
cutlass/include/cutlass/transform/thread/unary_op.h/0
{ "file_path": "cutlass/include/cutlass/transform/thread/unary_op.h", "repo_id": "cutlass", "token_count": 1663 }
45
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing computing the addresses of storing of tiles from pitch-linear rank=2 tensors. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment> class RegularTileAccessIterator< Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Element type per access using AccessType = Array<Element, ThreadMap::kElementsPerAccess>; private: // // Data members // /// Stride value StrideIndex stride_; /// Internal pointer to first access of tile AccessType *pointer_; /// Internal byte offset Index byte_offset_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), byte_offset_(0) { layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); // initialize pointer pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base)); set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_offset_ += pointer_offset * sizeof(Element); } /// Returns a pointer CUTLASS_DEVICE AccessType *get() const { AccessType *access_ptr = pointer_; int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess; char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset); return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset in the unit of tile. /// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory. /// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B. /// For row major A operand, k dimension is contiguous dimension; /// For col major A operand, k dimension is strided dimension; /// For row major B operand, k dimension is strided dimension; /// For col major B operand, k dimension is contiguous dimension. /// Below two classes map col/row major to the pitch linear coordinates used /// in this base class. CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { add_pointer_offset(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_ * ThreadMap::kElementsPerAccess); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for column major layouts /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment> class RegularTileAccessIterator< Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for row major layouts /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment> class RegularTileAccessIterator< Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h", "repo_id": "cutlass", "token_count": 4230 }
46
[README](../../README.md#documentation) > **CUTLASS 3: Building with Clang as host compiler** # Building with Clang as host compiler CUTLASS 3.2(.1) reintroduces support for building with Clang as host compiler, and NVCC as device compiler. This is NOT the same as building with Clang as both host and device compiler ("CUDA Clang"). # Software prerequisites 1. Clang (regularly tested with Clang 14; occasionally tested with Clang 10 and greater) 2. CUDA Toolkit (tested with 12.2; other versions likely work) 3. CMake (at least 3.18) 4. git 5. Python (at least 3.6) Experience with Ubuntu 22.04 LTS is that clang requires the following packages to be installed. ```bash $ sudo apt-get install clang cmake ninja-build pkg-config libgtk-3-dev liblzma-dev libstdc++-12-dev ``` A symptom of not installing all needed dependencies is the following error when attempting to use clang: `"/usr/bin/ld: cannot find -lstdc++: No such file or directory"`. # Running CMake ## Required CMake options The Clang build requires specifying the following CMake options. Replace `<path-to-clang++>` with the path to your `clang++` executable. You may use `clang++` directly if it is in your `PATH`. * `CMAKE_CXX_COMPILER=<path-to-clang++>` * `CMAKE_CUDA_HOST_COMPILER=<path-to-clang++>` One must set both! It's not enough just to set the `CXX` environment variable, for example. Symptoms of only setting `CMAKE_CXX_COMPILER` (or only setting the `CXX` environment variable) include `cc1plus` (GCC's compiler executable) reporting build errors due to it not understanding Clang's command-line options. Users can also specify a particular CUDA Toolkit version by setting the CMake option `CMAKE_CUDA_COMPILER` to the path to the `nvcc` executable that lives in the CUDA Toolkit's directory. For example, if `${PATH_TO_CUDA_TOOLKIT}` is the CUDA Toolkit directory, then one can set `CMAKE_CUDA_COMPILER` as follows. * `CMAKE_CUDA_COMPILER=${PATH_TO_CUDA_TOOLKIT}/bin/nvcc`
cutlass/media/docs/build/building_with_clang_as_host_compiler.md/0
{ "file_path": "cutlass/media/docs/build/building_with_clang_as_host_compiler.md", "repo_id": "cutlass", "token_count": 654 }
47
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS") [README](../../README.md#documentation) > **Fundamental Types** # Fundamental Types CUTLASS defies several fundamental numeric and container classes upon which computations and algorithms algorithms for linear algebra computations are implemented. Where possible, CUTLASS fundamental types mirror the C++ Standard Library. However, there are circumstances that necessitate divergence from the Standard Library's specification. In such cases, the CUTLASS implementation adopts unique capitalization to distinguish that standard vocabulary types may not be safely substituted in all cases. Most types in CUTLASS are usable in both host code and device code. Moreover, they are functional regardless of compute capability, but they may only be efficient when hardware support is present. ## Numeric Types CUTLASS defines classes for the following numeric data types. * `half_t`: IEEE half-precision floating point (exponent: 5b, mantissa: 10b; literal suffix `_hf`) * `bfloat16_t`: BFloat16 data type (exponent: 8b, mantissa: 7b; literal suffix `_bf16`) * `tfloat32_t`: Tensor Float 32 data type (exponent: 8b, mantissa: 10b; literal suffix `_tf32`) * `int4_t`, `uint4_t`: 4b signed and unsigned integer (literal suffx `_s4`, `_u4`) * `bin1_t`: 1b binary numeric type (literal suffix `_b1`) * `complex<T>`: defines complex-valued data type based on the supplied real-valued numeric type Numeric types in CUTLASS may be used in both host and device code and are intended to function like any other plain-old-data type. If CUTLASS is compiled with `CUTLASS_F16C_ENABLED`, then hardware conversion is used for half-precision types in host code. Regardless, `cutlass::half_t` uses the most efficient NVIDIA GPU hardware instructions available in device code. Example: ```c++ #include <iostream> #include <cutlass/numeric_types.h> __global__ void kernel(cutlass::half_t x) { printf("Device: %f\n", float(x * 2.0_hf)); } int main() { cutlass::half_t x = 0.5_hf; std::cin >> x; std::cout << "Host: " << 2.0_hf * x << std::endl; kernel<<< dim3(1,1), dim3(1,1,1) >>>(x); return 0; } ``` ## Containers CUTLASS uses the following containers extensively for implementing efficient CUDA kernels. ### Array ```c++ template < typename T, // element type int N // number of elements > struct Array; ``` `Array<class T, int N>` defines a statically sized array of elements of type _T_ and size _N_. This class is similar to [`std::array<>`](https://en.cppreference.com/w/cpp/container/array) in the Standard Library with one notable exception: partial specializations exist to pack or unpack elements smaller than one byte. `Array<>` is intended to be a convenient and uniform container class to store arrays of numeric elements regardless of data type or vector length. The storage needed is expected to be the minimum necessary given the logical size of each numeric type in bits (numeric types smaller than one byte are densely packed). Nevertheless, the size reported by `sizeof(Array<T, N>)` is always an integer multiple of bytes. Storing numeric elements in a C++ STL-style container class enables useful modern C++ mechanisms such as range-based for loops. For example, to print the elements of `Array<>`, the following range-based for loop syntax is always valid regardless of numeric data type, compute capability, or context in host or device code. Example: ```c++ int const kN; Array<T, kN> elements; CUTLASS_PRAGMA_UNROLL // required to ensure array remains in registers for (auto x : elements) { printf("%d, %f", int64_t(x), double(x)); // explictly convert to int64_t or double } ``` When copying `Array<>` objects or passing them as arguments to methods, it is best to avoid accessing individual elements. This enables the use of vector instructions to perform the operation more efficiently. For example, setting all elements to zero is best performed by calling the `clear()` method. Copies should be performed by assigning the entire object. Example: ```c++ #include <cutlass/array.h> int const kN; Array<T, kN> source; Array<T, kN> destination; source.clear(); // set all elements to value of zero destination = source; // copy to `destination` ``` `Array<>` may be used to store elements smaller than one byte such as 4b integers. ```c++ Array<int4b_t, 2> packed_integers; static_assert( sizeof(packed_integers) == 1, "Packed storage of sub-byte data types is compact."); // Access array elements using usual indirection and assignment operators packed_integers[0] = 2_s4; packed_integers[1] = 3_s4; CUTLASS_PRAGMA_UNROLL for (auto x : elements) { printf("%d", int(x)); // access elements normally } ``` ### AlignedArray ```c++ template < typename T, // element type int N, // number of elements int Alignment // alignment requirement in bytes > class AlignedArray; ``` `AlignedArray` is derived from `Array<T, N>` and supports an optional alignment field. Pointers to objects of type `AlignedArray<>` reliably yield vectorized memory accesses when dereferenced. Example: ```c++ int const kN = 8; ArrayAligned<half_t, kN> source; ArrayAligned<half_t, kN> const *ptr = ...; source = *ptr; // 128b aligned memory access ``` ### AlignedBuffer ```c++ template < typename T, // element type int N, // number of elements int Alignment // alignment requirement in bytes > class AlignedBuffer; ``` `AlignedBuffer` provides a uniform way to define aligned memory allocations for all data types. This is particularly useful in defining allocations within shared memory with guaranteed memory alignment needed for vectorized access. Note, constructors of the elements within AlignedBuffer<> are not called, and so the elements are initially in an undefined state. Use `AlignedBuffer<>::data()` to obtain a pointer to the first element of the buffer. **Example:** Guaranteed aligned shared memory allocation. Note, shared memory contents are uninitialized. ```c++ int const kN = 32; int const kAlignment = 16; // alignment in bytes // Define a shared memory allocation in device code __shared__ AlignedBuffer<complex<half_t>, kN, kAlignment> matrix_tile; complex<half_t> *ptr = matrix_tile.data(); // ptr is guaranteed to have 128b (16 Byte) alignment ``` Note, `AlignedBuffer<>` only guarantees that its internal memory allocation is aligned, obtained by `AlignedBuffer<>::data()`. There is no guarantee that the `AlignedBuffer<>` object itself satisfies alignment constraints or that its internal memory allocation is contiguous. Device code performing vectorized memory accesses should use the `AlignedArray<>` type. **_Example_:** Vectorized memory access to shared memory allocations. ```c++ int const kN = 1024; __shared__ AlignedBuffer<half_t, kN> smem_buffer; AlignedArray<half_t, 8> *ptr = reinterpret_cast<AlignedArray<half_t, 8> *>(smem_buffer.data()); AlignedArray<half_t, 8> x = ptr[threadIdx.x]; // 128b shared memory load ``` ### Numeric Conversion CUTLASS defines procedures for performing numeric conversion between data types in `cutlass/numeric_conversion.h`. Where possible, these target hardware acceleration on the target architecture and support multiple rounding modes. ```c++ #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" NumericConverter<half_t, float> convert_f32_to_f16; NumericConverter<tfloat32_t, float> convert_f32_to_tf32; half_t x = convert_f32_to_f16(3.14159f); tfloat32_t y = convert_f32_to_tf32(3.14159f); ``` Recent GPU architectures such as NVIDIA Turing and Ampere combine numeric conversion with efficient packing into bit vectors. Consequently, CUTLASS defines conversion on both scalars and `Array<>` objects to implement the optimal code sequence on all architectures. ```c++ // // Example: convert and pack 32b signed integers to a vector of packed signed 8-bit integers. // int const kN = 16; Array<int8_t, kN> destination; Array<int, kN> source; NumericConverter<descltype(destination), decltype(source)> convert; destination = convert(source); ``` ### Coord ```c++ template < int Rank, typename Index = int > class Coord; ``` `Coord<Rank, class T = int>` is a container used explicitly for defining logical coordinates in tensors of known rank. Traditional vector operators are defined such as `+`, `-`, and scalar multiplication `*` to simplify the creation of vector-valued expressions on tensor coordinates. **Example:** Vector operations on coordinates. ```c++ Coord<2> compute_offset(Coord<2> const & base) { Coord<2> stride = make_Coord(1, kM); return base + stride * make_Coord(threadIdx.x, threadIdx.y); } ``` Instances of `Coord<>` are used throughout CUTLASS to compute indices into tensors. Frequently, the dimensions of tensors of known layouts may be given names such as "rows" or "columns". To clarify the code, we have implemented several classes derived from `Coord<>` with accessors for each coordinate member. Such classes include: ```c++ struct MatrixCoord : public Coord<2> { Index & row(); Index & column(); }; ``` and ```c++ struct Tensor4DCoord : public Coord<4> { Index & n(); Index & h(); Index & w(); Index & c(); }; ``` ### PredicateVector<int Bits> `PredicateVector<int Bits>` contains a statically sized array of hardware predicates packed into registers to enable efficient access within unrolled loops. This container is optimized for sequential access through iterators, though these are only efficient when used within fully unrolled loops. Moreover, instances of `PredicateVector<>` are not guaranteed to be updated until any non-const iterator objects have gone out of scope. This is because iterators are effectively caches that update the `PredicateVector<>` instance's internal storage as a batch. **Example:** Managing an array of predicates. ```c++ unsigned mask; PredicateVector<kBits> predicates; // Nested scope to update predicates via an iterator { auto pred_it = predicates.begin(); CUTLASS_PRAGMA_UNROLL for (int bit = 0; bit < kBits; ++bit, ++pred_it) { bool guard = (mask & (1u << bit)); pred_it.set(guard); } } // Efficient use of predicates to guard memory instructions T *ptr; Array<T, kAccesses> fragment; auto pred_it = predicates.const_begin(); for (int access = 0; access < kAccesses; ++access, ++pred_it) { if (*pred_it) { fragment[access] = ptr[access]; } } ``` Note: `PredicateVector<>` is not efficient when accessed via dynamic random access. If an array of bits is needed with dynamic random access (in contrast with access via _constexpr_ indices), then `Array<bin1_t, N>` should be used instead. ## Functional CUTLASS defines function objects corresponding to basic arithmetic operations modeled after C++ Standard Library's `<functional>` header. CUTLASS extends this by defining `multiply_add<T>` which computes `d = a * b + c`. The partial specialization `multiply_add<complex<T>>` computes complex-valued multiplication and addition using four real-valued multiply-add operations; these may correspond to native hardware instructions. Example: ```c++ complex<float> a; complex<float> b; complex<float> c; complex<float> d; multiply_add<complex<float>> mad_op; d = mad_op(a, b, c); // four single-precision multiply-add instructions ``` CUTLASS defines partial specializations for type `Array<T, N>`, performing elementwise operations on each element. A further partial specialization for `Array<half_t, N>` targets may target native SIMD instructions for compute capability SM60 and beyond. **Example:** Fused multiply-add of arrays of half-precision elements. ```c++ static int const kN = 8; Array<half_t, kN> a; Array<half_t, kN> b; Array<half_t, kN> c; Array<half_t, kN> d; multiply_add<Array<half_t, kN>> mad_op; d = mad_op(a, b, c); // efficient multiply-add for Array of half-precision elements ``` ## Numeric Conversion Operators are define to convert between numeric types in `numeric_conversion.h`. Conversion operators are defined in terms of individual numeric elements and on arrays which enable the possibility of efficient hardware support on current and future NVIDIA GPUs. **Example:** Converting between 32-b and 8-b integers. ```c++ ``` # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/fundamental_types.md/0
{ "file_path": "cutlass/media/docs/fundamental_types.md", "repo_id": "cutlass", "token_count": 4214 }
48
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import logging import os import sys import cutlass_library def _cuda_install_path_from_nvcc() -> str: import subprocess # Attempt to detect CUDA_INSTALL_PATH based on location of NVCC result = subprocess.run(['/usr/bin/which', 'nvcc'], capture_output=True) if result.returncode != 0: raise Exception(f'Unable to find nvcc via `which` utility.') cuda_install_path = result.stdout.decode('utf-8').split('/bin/nvcc')[0] if not os.path.isdir(cuda_install_path): raise Exception(f'Environment variable "CUDA_INSTALL_PATH" is not defined, ' f'and default path of {cuda_install_path} does not exist.') return cuda_install_path CUTLASS_PATH = os.getenv("CUTLASS_PATH", cutlass_library.source_path) # Alias CUTLASS_PATH as source_path source_path = CUTLASS_PATH _CUDA_INSTALL_PATH = None def cuda_install_path(): """ Helper method for on-demand fetching of the CUDA installation path. This allows the import of CUTLASS to proceed even if NVCC is not available, preferring to raise this error only when an operation that needs NVCC is being performed. """ global _CUDA_INSTALL_PATH if _CUDA_INSTALL_PATH is None: _CUDA_INSTALL_PATH = os.getenv("CUDA_INSTALL_PATH", _cuda_install_path_from_nvcc()) return _CUDA_INSTALL_PATH CACHE_FILE = "compiled_cache.db" from cutlass_library import ( DataType, EpilogueScheduleType, KernelScheduleType, MathOperation, LayoutType, OpcodeClass, TileDescription, TileSchedulerType, ) this = sys.modules[__name__] this.logger = logging.getLogger(__name__) # RMM is only supported for Python 3.9+ if (sys.version_info.major == 3 and sys.version_info.major > 8) or sys.version_info.major > 3: try: import rmm this.use_rmm = True except ImportError: this.use_rmm = False else: this.use_rmm = False def set_log_level(level: int): """ Sets the log level :param log_level: severity of logging level to use. See https://docs.python.org/3/library/logging.html#logging-levels for options :type log_level: int """ this.logger.setLevel(level) set_log_level(logging.ERROR) from cutlass.library_defaults import OptionRegistry from cutlass.backend.utils.device import device_cc this._option_registry = None def get_option_registry(): """ Helper method for on-demand initialization of the options registry. This avoids building the registry when CUTLASS is imported. """ if this._option_registry is None: this.logger.info("Initializing option registry") this._option_registry = OptionRegistry(device_cc()) return this._option_registry this.__version__ = '3.5.0' from cutlass.backend import create_memory_pool from cutlass.emit.pytorch import pytorch from cutlass.op.gemm import Gemm from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad from cutlass.op.gemm_grouped import GroupedGemm from cutlass.op.op import OperationBase from cutlass.backend.evt.ir.tensor import Tensor this.memory_pool = None def get_memory_pool(): """" Helper method for on-demand memory pool. This avoids allocating the memory pool unnecessarily whe CUTLASS is imported. """ if this.use_rmm and this.memory_pool is None: this.memory_pool = create_memory_pool(init_pool_size=2 ** 30, max_pool_size=2 ** 32) return this.memory_pool from cuda import cuda, cudart this._device_id = None def initialize_cuda_context(): if this._device_id is not None: return if this.use_rmm: # This also covers initializing the CUDA context get_memory_pool() device_id = os.getenv("CUTLASS_CUDA_DEVICE_ID") if device_id is None: if not this.use_rmm: # Manually call cuInit() and create context by making a runtime API call err, = cudart.cudaFree(0) if err != cudart.cudaError_t.cudaSuccess: raise RuntimeError(f"cudaFree failed with error {err}") err, device_count = cuda.cuDeviceGetCount() if err != cuda.CUresult.CUDA_SUCCESS: raise Exception(f"cuDeviceGetCount failed with error {err}") if device_count <= 0: raise Exception("No CUDA devices found") device_id = 0 this._device_id = int(device_id) def device_id() -> int: initialize_cuda_context() return this._device_id
cutlass/python/cutlass/__init__.py/0
{ "file_path": "cutlass/python/cutlass/__init__.py", "repo_id": "cutlass", "token_count": 2173 }
49
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Base class for Python EVT Frontend """ from typing import Union from cutlass_library import DataType from cutlass.backend.evt.ir import ( ComputeNode, DAGIR, LayoutNode, LoadNode, StoreNode, ) from cutlass.backend.evt.passes import ( EVTGraphDrawer, EVTPassManager, GetSmemSize, PassDAG2Tree, PassGetArgumentType, PassGetImpl, PassFixElementD, PassLayoutManipulateElimination, PassPreprocessRed, PassShapeTypePropagation, ) from cutlass.backend.utils import device_cc from cutlass.epilogue.evt_ops import permute, reshape from cutlass.utils.datatypes import library_type class EVTFrontendBase: layout_fns = { "permute": permute, "reshape": reshape } def __init__(self, element_compute=DataType.f32, cc=None, additional_passes=[], **kwargs) -> None: self.cc = cc if cc else device_cc() self.element_compute = library_type(element_compute) self.dag_ir = DAGIR(self.element_compute, self.cc) self.compute_cnt = 0 self.layout_cnt = 0 self.pass_manager = EVTPassManager( self.dag_ir, [ PassPreprocessRed, PassGetArgumentType, PassShapeTypePropagation, PassLayoutManipulateElimination, PassGetImpl, PassDAG2Tree, PassFixElementD ] + additional_passes) if self.cc == 80: self._epilogue_stages = 1 else: self._epilogue_stages = None @property def epilogue_stages(self): return self._epilogue_stages @epilogue_stages.setter def epilogue_stages(self, stages): self._epilogue_stages = stages def parse(self, *args, **kwargs): raise NotImplementedError(f"The 'parse' function must be overloaded in frontend class") def trace(self, *args, **kwargs): # Parse the input self.parse(*args, **kwargs) # Run the passes self.pass_manager() # Set the epilogue type self.epilogue_thread_type = self.dag_ir.epilogue_thread_type if self.cc == 90: self.arg_c_type = self.dag_ir.arg_c_type self.arg_d_type = self.dag_ir.arg_d_type self.reduction_names = self.dag_ir.reduction_names # # Helper functions for DAG IR manipulation # def add_node(self, node): self.dag_ir.add_node(node) def add_edge(self, src, tgt, weight=0): self.dag_ir.add_edge(src, tgt, weight=weight) def set_tensor(self, node_name, example): """ Add an example tensor to node {node_name} in the DAG IR """ meta = self.dag_ir.get_node_meta(node_name) meta.tensor = {"tensor": example} def set_store_tensor(self, node_name, example): """ Add an example tensor to node {node_name} in the DAG IR """ meta = self.dag_ir.get_node_meta(node_name) meta.store_tensor = {"tensor": example} def mark_output(self, node_name): """ Mark a store node as output """ meta = self.dag_ir.get_node_meta(node_name) if not isinstance(meta, StoreNode): raise ValueError( f"Only StoreNodes can be marked as output. " f"Got {type(meta).__name__}: {node_name}") meta.is_output = True # Add node with specific type def add_load_node(self, name, example): """ Add a Load node to DAG IR :param name: name of the loaded variable :type name: str :param example: example input :type example: np.ndarray|torch.Tensor|cupy.ndarray|float """ if name is None: raise ValueError(f"Name is not provided.") if example is None: raise ValueError(f"Example input for {name} is not provided.") load_node = LoadNode(name) load_node.tensor = {"tensor": example} # Special logics for accumulator if name == "accum": if load_node.tensor.rank == 2: new_shape = tuple([1, ] + list(load_node.tensor.shape)) load_node.tensor.broadcast(new_shape) elif load_node.tensor.rank < 2 or load_node.tensor.rank > 3: raise ValueError(f"Expect example inputs for 'accum' be a rank-2 or rank-3 tensor. Got {load_node.tensor.shape}.") self.add_node(load_node) def add_imm(self, value: Union[float,int]): """ Add an immediate scalar value to DAG IR :param value: the value of the immediate scalar :type value: float """ try: value = float(value) except: raise ValueError(f"{type(value).__name__} cannot be converted to float.") name = f"imm_{value}".replace('.', '_') load_node = LoadNode(name) load_node.tensor = {"tensor": value, "is_constant": True} self.add_node(load_node) return name def add_compute_node(self, op, name=None): """ Add a compute node. :param op: the computation op :param name: the node name (optional) :type name: str :return: the name of the compute node """ if name is None: name = f"compute_{self.compute_cnt}" self.compute_cnt += 1 compute_node = ComputeNode( name=name, fn=op, element_output=self.element_compute, element_compute=self.element_compute) self.add_node(compute_node) return compute_node.name def add_layout_node(self, op, kwargs, name=None): """ Add a layout node. :param op: the layout op :type op: evt_ops :param name: the node name (optional) :type name: str :return: the name of the layout node """ if name is None: name = f"layout_{self.layout_cnt}" self.layout_cnt += 1 layout_node = LayoutNode(name=name, fn=op, kwargs=kwargs) self.add_node(layout_node) return layout_node.name def add_store_node(self, name): store_node = StoreNode(name) self.add_node(store_node) # # Visualization The DAG IR # def visualize(self, name="dag_ir"): """ Visualize the dag ir with svg file :param name: the name of the graph """ drawer = EVTGraphDrawer(self.dag_ir, name) try: for name, graph in drawer.get_dot_graph(): graph.write_svg(f"./{name}.svg") except: raise RuntimeError( "'dot' is not found in path. GraphDrawer is disabled. " "Please install it with 'sudo apt-get install graphviz'." ) # # Get shared memory size # def get_smem_size(self, tile_description): """ Get the shared memory size of the epilogue """ smem_size = GetSmemSize(self.dag_ir)(tile_description) return smem_size
cutlass/python/cutlass/backend/evt/frontend/frontend_base.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/frontend/frontend_base.py", "repo_id": "cutlass", "token_count": 3787 }
50
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utility functions for interacting with the device """ from cuda import cuda, cudart import cutlass from cutlass.utils.datatypes import is_cupy_tensor, is_numpy_tensor, is_torch_tensor def check_cuda_errors(result: list): """ Checks whether `result` contains a CUDA error raises the error as an exception, if so. Otherwise, returns the result contained in the remaining fields of `result`. :param result: the results of the `cudart` method, consisting of an error code and any method results :type result: list :return: non-error-code results from the `results` parameter """ # `result` is of the format : (cudaError_t, result...) err = result[0] if err.value: raise RuntimeError("CUDA error: {}".format(cudart.cudaGetErrorName(err))) if len(result) == 1: return None elif len(result) == 2: return result[1] else: return result[1:] def device_cc(device: int = -1) -> int: """ Returns the compute capability of the device with ID `device`. :param device: ID of the device to query :type device: int :return: compute capability of the queried device (e.g., 80 for SM80) :rtype: int """ if device == -1: device = cutlass.device_id() deviceProp = check_cuda_errors(cudart.cudaGetDeviceProperties(device)) major = str(deviceProp.major) minor = str(deviceProp.minor) return int(major + minor) def device_sm_count(device: int = -1): if device == -1: device = cutlass.device_id() err, device_sm_count = cuda.cuDeviceGetAttribute( cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device ) if err != cuda.CUresult.CUDA_SUCCESS: raise Exception( "Failed to retireve SM count. " f"cuDeviceGetAttribute() failed with error: {cuda.cuGetErrorString(err)[1]}" ) return device_sm_count def to_device_ptr(tensor) -> cuda.CUdeviceptr: """ Converts a tensor to a CUdeviceptr :param tensor: tensor to convert :type tensor: np.ndarray | torch.Tensor | cp.ndarray | int :return: device pointer :rtype: cuda.CUdeviceptr """ if is_numpy_tensor(tensor): ptr = cuda.CUdeviceptr(tensor.__array_interface__["data"][0]) elif is_torch_tensor(tensor): ptr = cuda.CUdeviceptr(tensor.data_ptr()) elif is_cupy_tensor(tensor): ptr = cuda.CUdeviceptr(int(tensor.data.ptr)) elif isinstance(tensor, cuda.CUdeviceptr): ptr = tensor elif isinstance(tensor, int): ptr = cuda.CUdeviceptr(tensor) else: raise NotImplementedError(tensor) return ptr
cutlass/python/cutlass/backend/utils/device.py/0
{ "file_path": "cutlass/python/cutlass/backend/utils/device.py", "repo_id": "cutlass", "token_count": 1519 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utility functions for checking constraints on kernels and calculating kernel attributes """ import ctypes from cutlass_library import DataTypeSize, OperationKind, SharedMemPerCC import cutlass from cutlass.backend.library import TileDescription def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: OperationKind) -> int: """ Returns the amount of shared memory in bytes consumed in a single stage of a kernel. :param td: tile description to compute shared memory of :type td: TileDescription :param operation_kind: identifier for the type of operation being performed :type operation_kind: cutlass_library.OperationKind :return: number of bytes of shared memory consumed by a single stage :rtype: int """ m, n, k = td.threadblock_shape if operation_kind == OperationKind.Gemm: stage_barrier_bytes = 32 return ( (DataTypeSize[td.math_instruction.element_a] * m * k // 8) + (DataTypeSize[td.math_instruction.element_b] * k * n // 8) + stage_barrier_bytes ) else: raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}") def calculate_smem_usage(operation) -> int: """ Returns the amount of shared memory in bytes consumed by a kernel. :return: number of bytes of shared memory consumed by the operation :return: int """ _per_stage = calculate_smem_usage_per_stage(operation.tile_description, operation.operation_kind) return _per_stage * operation.tile_description.stages def valid_stage_count( cc: int, kernel_cc: int, td: TileDescription, element_C: cutlass.DataType = None, element_D: cutlass.DataType = None, verbose: bool = True) -> tuple: """ Checks whether a device with `cc` supports the number of stages within `tile_description`, both based on raw limits on the number of stages and based on shared memory capacity :param cc: compute capability of device in question :type cc: int :param kernel_cc: compute capability that the kernel targets (corresponding to the arch::SMxy tag in CUTLASS) :type kernel_cc: int :param td: tile description to check :type td: TileDescription :param element_C: data type of operand C :type element_C: cutlass.DataType :param element_D: data type of operand D :type element_D: cutlass.DataType :param verbose: whether to log warnings :type verbose: bool :return: tuple with the first element indicating whether the provided tile description is valid for the provided device and the second element being an error message :rtype: tuple """ if kernel_cc == 90: if (td.stages is None or td.stages == 0): # Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically # determines the stage count to use. Thus, all settings are valid in these scenarios. return (True, "") elif verbose: cutlass.logger.warning( "Setting an explicit stage count for SM90 kernels currently may " "result in compilation errors if the combination of tile shape, " "stage count, and shared memory requirement of the epilogue exceeds " "the available shared memory per SM.") if td.stages <= 0: return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.") if cc < 80 and td.stages != 2: return (False, f"Tile description has stage count of {td.stages}, " f"but only 2 stages are supported on SM{cc}.") # The calculation below does not consider shared memory used by the epilogue and, thus, # only catches cases in which the mainloop exceeds the device's shared memory capacity. # This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the # mainloop and epilogue is shared. smem_per_stage = calculate_smem_usage_per_stage(td, OperationKind.Gemm) smem_usage_mainloop = (smem_per_stage * td.stages) smem_arch = SharedMemPerCC[cc] << 10 if smem_usage_mainloop > smem_arch: return ( False, "Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n" f"Details:\n" f"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and " f"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\n" f"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.") return (True, "") def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple: """ Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`. :param cc: compute capability of device in question :type cc: int :param cluster_shape: dimensions of thread block cluster shape to check :type cluster_shape: list :return: tuple with the first element indicating whether the provided cluster shape is valid for the provided device and the second element being an error message :rtype: tuple """ if cc < 90: if cluster_shape != [1, 1, 1]: return (False, f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of " f"{cluster_shape} for SM{cc}.") else: return (True, "") if len(cluster_shape) != 3: return (False, f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}") if cluster_shape[2] != 1: return (False, "CUTLASS kernels currently require the third dimension of cluster shape to be 1. " f"Received cluster shape of {cluster_shape}.") # The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster # as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters). # Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions, # so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total. blocks_in_2d = cluster_shape[0] * cluster_shape[1] if blocks_in_2d > 8: return (False, f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. " f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.") return (True, "") def valid_schedule( cc: int, kernel_schedule: cutlass.KernelScheduleType, epilogue_schedule: cutlass.EpilogueScheduleType, tile_scheduler: cutlass.TileSchedulerType) -> tuple: """ Checks that the kernel and epilogue schedules passed in are a valid combination for a device of compute capability ``cc``. :param cc: compute capability of device in question :type cc: int :param kernel_schedule: kernel schedule type :type kernel_schedule: cutlass.KernelScheduleType :param epilogue_schedule: epilogue schedule type :type epilogue_schedule: cutlass.EpilogueScheduleType :param tile_scheduler: tile scheduler type :type tile_scheduler: cutlass.TileSchedulerType :return: tuple with the first element indicating whether the provided schedules are valid for the provided device and the second element being an error message :rtype: tuple """ kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto) epilogue_auto = (epilogue_schedule == cutlass.EpilogueScheduleType.ScheduleAuto) tile_scheduler_default = (tile_scheduler == cutlass.TileSchedulerType.Default) if cc < 90 and not (kernel_auto and epilogue_auto and tile_scheduler_default): return (False, "Non-default schedules are only supported on SM90 and beyond") if (kernel_auto and not epilogue_auto) or (not kernel_auto and epilogue_auto): return (False, "Kernel and epilogue schedules must either both be auto or neither be auto") if not tile_scheduler_default: cooperative_kernels = [cutlass.KernelScheduleType.TmaWarpSpecializedCooperative, cutlass.KernelScheduleType.CpAsyncWarpSpecializedCooperative] if (tile_scheduler == cutlass.TileSchedulerType.StreamK) and (kernel_schedule not in cooperative_kernels): return (False, "Stream-K tile scheduler is currently only supported with the cooperative kernel schedule") return (True, "") def alignment_or_default(alignment_provided: int, default_alignment: int) -> int: """ Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`. :param alignment_provided: alignment preference specified. Can be None. :type alignment_provided: int :param default_alignment: alignment to use if `alignment_provided` is None :type default_alignment: int :return: alignment to use :rtype: int """ if alignment_provided is not None: if alignment_provided > default_alignment: raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") return alignment_provided return default_alignment def update_alignment(alignment_provided:int, default_alignment: int) -> int: """ Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`. :param alignment_provided: alignment preference specified. Can be None. :type alignment_provided: int :param default_alignment: alignment to use if `alignment_provided` is None :type default_alignment: int :return: alignment to use :rtype: int """ if alignment_provided is not None: if alignment_provided > default_alignment: if alignment_provided % default_alignment == 0: return default_alignment raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") return alignment_provided return default_alignment
cutlass/python/cutlass/utils/check.py/0
{ "file_path": "cutlass/python/cutlass/utils/check.py", "repo_id": "cutlass", "token_count": 4035 }
52
var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), VERSION: '3.1.0', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', FILE_SUFFIX: '.html', LINK_SUFFIX: '.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '.txt', NAVIGATION_WITH_KEYS: false, SHOW_SEARCH_SUMMARY: true, ENABLE_SEARCH_SHORTCUTS: true, };
cutlass/python/docs/_static/documentation_options.js/0
{ "file_path": "cutlass/python/docs/_static/documentation_options.js", "repo_id": "cutlass", "token_count": 196 }
53
/* * searchtools.js * ~~~~~~~~~~~~~~~~ * * Sphinx JavaScript utilities for the full-text search. * * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ "use strict"; /** * Simple result scoring code. */ if (typeof Scorer === "undefined") { var Scorer = { // Implement the following function to further tweak the score for each result // The function takes a result array [docname, title, anchor, descr, score, filename] // and returns the new score. /* score: result => { const [docname, title, anchor, descr, score, filename] = result return score }, */ // query matches the full name of an object objNameMatch: 11, // or matches in the last dotted part of the object name objPartialMatch: 6, // Additive scores depending on the priority of the object objPrio: { 0: 15, // used to be importantResults 1: 5, // used to be objectResults 2: -5, // used to be unimportantResults }, // Used when the priority is not in the mapping. objPrioDefault: 0, // query found in title title: 15, partialTitle: 7, // query found in terms term: 5, partialTerm: 2, }; } const _removeChildren = (element) => { while (element && element.lastChild) element.removeChild(element.lastChild); }; /** * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping */ const _escapeRegExp = (string) => string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string const _displayItem = (item, searchTerms) => { const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; const [docName, title, anchor, descr, score, _filename] = item; let listItem = document.createElement("li"); let requestUrl; let linkUrl; if (docBuilder === "dirhtml") { // dirhtml builder let dirname = docName + "/"; if (dirname.match(/\/index\/$/)) dirname = dirname.substring(0, dirname.length - 6); else if (dirname === "index/") dirname = ""; requestUrl = docUrlRoot + dirname; linkUrl = requestUrl; } else { // normal html builders requestUrl = docUrlRoot + docName + docFileSuffix; linkUrl = docName + docLinkSuffix; } let linkEl = listItem.appendChild(document.createElement("a")); linkEl.href = linkUrl + anchor; linkEl.dataset.score = score; linkEl.innerHTML = title; if (descr) listItem.appendChild(document.createElement("span")).innerHTML = " (" + descr + ")"; else if (showSearchSummary) fetch(requestUrl) .then((responseData) => responseData.text()) .then((data) => { if (data) listItem.appendChild( Search.makeSearchSummary(data, searchTerms) ); }); Search.output.appendChild(listItem); }; const _finishSearch = (resultCount) => { Search.stopPulse(); Search.title.innerText = _("Search Results"); if (!resultCount) Search.status.innerText = Documentation.gettext( "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." ); else Search.status.innerText = _( `Search finished, found ${resultCount} page(s) matching the search query.` ); }; const _displayNextItem = ( results, resultCount, searchTerms ) => { // results left, load the summary and display it // this is intended to be dynamic (don't sub resultsCount) if (results.length) { _displayItem(results.pop(), searchTerms); setTimeout( () => _displayNextItem(results, resultCount, searchTerms), 5 ); } // search finished, update title and status message else _finishSearch(resultCount); }; /** * Default splitQuery function. Can be overridden in ``sphinx.search`` with a * custom function per language. * * The regular expression works by splitting the string on consecutive characters * that are not Unicode letters, numbers, underscores, or emoji characters. * This is the same as ``\W+`` in Python, preserving the surrogate pair area. */ if (typeof splitQuery === "undefined") { var splitQuery = (query) => query .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) .filter(term => term) // remove remaining empty strings } /** * Search Module */ const Search = { _index: null, _queued_query: null, _pulse_status: -1, htmlToText: (htmlString) => { const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); const docContent = htmlElement.querySelector('[role="main"]'); if (docContent !== undefined) return docContent.textContent; console.warn( "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." ); return ""; }, init: () => { const query = new URLSearchParams(window.location.search).get("q"); document .querySelectorAll('input[name="q"]') .forEach((el) => (el.value = query)); if (query) Search.performSearch(query); }, loadIndex: (url) => (document.body.appendChild(document.createElement("script")).src = url), setIndex: (index) => { Search._index = index; if (Search._queued_query !== null) { const query = Search._queued_query; Search._queued_query = null; Search.query(query); } }, hasIndex: () => Search._index !== null, deferQuery: (query) => (Search._queued_query = query), stopPulse: () => (Search._pulse_status = -1), startPulse: () => { if (Search._pulse_status >= 0) return; const pulse = () => { Search._pulse_status = (Search._pulse_status + 1) % 4; Search.dots.innerText = ".".repeat(Search._pulse_status); if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); }; pulse(); }, /** * perform a search for something (or wait until index is loaded) */ performSearch: (query) => { // create the required interface elements const searchText = document.createElement("h2"); searchText.textContent = _("Searching"); const searchSummary = document.createElement("p"); searchSummary.classList.add("search-summary"); searchSummary.innerText = ""; const searchList = document.createElement("ul"); searchList.classList.add("search"); const out = document.getElementById("search-results"); Search.title = out.appendChild(searchText); Search.dots = Search.title.appendChild(document.createElement("span")); Search.status = out.appendChild(searchSummary); Search.output = out.appendChild(searchList); const searchProgress = document.getElementById("search-progress"); // Some themes don't use the search progress node if (searchProgress) { searchProgress.innerText = _("Preparing search..."); } Search.startPulse(); // index already loaded, the browser was quick! if (Search.hasIndex()) Search.query(query); else Search.deferQuery(query); }, /** * execute search (requires search index to be loaded) */ query: (query) => { const filenames = Search._index.filenames; const docNames = Search._index.docnames; const titles = Search._index.titles; const allTitles = Search._index.alltitles; const indexEntries = Search._index.indexentries; // stem the search terms and add them to the correct list const stemmer = new Stemmer(); const searchTerms = new Set(); const excludedTerms = new Set(); const highlightTerms = new Set(); const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); splitQuery(query.trim()).forEach((queryTerm) => { const queryTermLower = queryTerm.toLowerCase(); // maybe skip this "word" // stopwords array is from language_data.js if ( stopwords.indexOf(queryTermLower) !== -1 || queryTerm.match(/^\d+$/) ) return; // stem the word let word = stemmer.stemWord(queryTermLower); // select the correct list if (word[0] === "-") excludedTerms.add(word.substr(1)); else { searchTerms.add(word); highlightTerms.add(queryTermLower); } }); if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) } // console.debug("SEARCH: searching for:"); // console.info("required: ", [...searchTerms]); // console.info("excluded: ", [...excludedTerms]); // array of [docname, title, anchor, descr, score, filename] let results = []; _removeChildren(document.getElementById("search-progress")); const queryLower = query.toLowerCase(); for (const [title, foundTitles] of Object.entries(allTitles)) { if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { for (const [file, id] of foundTitles) { let score = Math.round(100 * queryLower.length / title.length) results.push([ docNames[file], titles[file] !== title ? `${titles[file]} > ${title}` : title, id !== null ? "#" + id : "", null, score, filenames[file], ]); } } } // search for explicit entries in index directives for (const [entry, foundEntries] of Object.entries(indexEntries)) { if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { for (const [file, id] of foundEntries) { let score = Math.round(100 * queryLower.length / entry.length) results.push([ docNames[file], titles[file], id ? "#" + id : "", null, score, filenames[file], ]); } } } // lookup as object objectTerms.forEach((term) => results.push(...Search.performObjectSearch(term, objectTerms)) ); // lookup as search terms in fulltext results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); // let the scorer override scores with a custom scoring function if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); // now sort the results by score (in opposite order of appearance, since the // display function below uses pop() to retrieve items) and then // alphabetically results.sort((a, b) => { const leftScore = a[4]; const rightScore = b[4]; if (leftScore === rightScore) { // same score: sort alphabetically const leftTitle = a[1].toLowerCase(); const rightTitle = b[1].toLowerCase(); if (leftTitle === rightTitle) return 0; return leftTitle > rightTitle ? -1 : 1; // inverted is intentional } return leftScore > rightScore ? 1 : -1; }); // remove duplicate search results // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept let seen = new Set(); results = results.reverse().reduce((acc, result) => { let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); if (!seen.has(resultStr)) { acc.push(result); seen.add(resultStr); } return acc; }, []); results = results.reverse(); // for debugging //Search.lastresults = results.slice(); // a copy // console.info("search results:", Search.lastresults); // print the results _displayNextItem(results, results.length, searchTerms); }, /** * search for object names */ performObjectSearch: (object, objectTerms) => { const filenames = Search._index.filenames; const docNames = Search._index.docnames; const objects = Search._index.objects; const objNames = Search._index.objnames; const titles = Search._index.titles; const results = []; const objectSearchCallback = (prefix, match) => { const name = match[4] const fullname = (prefix ? prefix + "." : "") + name; const fullnameLower = fullname.toLowerCase(); if (fullnameLower.indexOf(object) < 0) return; let score = 0; const parts = fullnameLower.split("."); // check for different match types: exact matches of full name or // "last name" (i.e. last dotted part) if (fullnameLower === object || parts.slice(-1)[0] === object) score += Scorer.objNameMatch; else if (parts.slice(-1)[0].indexOf(object) > -1) score += Scorer.objPartialMatch; // matches in last name const objName = objNames[match[1]][2]; const title = titles[match[0]]; // If more than one term searched for, we require other words to be // found in the name/title/description const otherTerms = new Set(objectTerms); otherTerms.delete(object); if (otherTerms.size > 0) { const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); if ( [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) ) return; } let anchor = match[3]; if (anchor === "") anchor = fullname; else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; const descr = objName + _(", in ") + title; // add custom score for some objects according to scorer if (Scorer.objPrio.hasOwnProperty(match[2])) score += Scorer.objPrio[match[2]]; else score += Scorer.objPrioDefault; results.push([ docNames[match[0]], fullname, "#" + anchor, descr, score, filenames[match[0]], ]); }; Object.keys(objects).forEach((prefix) => objects[prefix].forEach((array) => objectSearchCallback(prefix, array) ) ); return results; }, /** * search for full-text terms in the index */ performTermsSearch: (searchTerms, excludedTerms) => { // prepare search const terms = Search._index.terms; const titleTerms = Search._index.titleterms; const filenames = Search._index.filenames; const docNames = Search._index.docnames; const titles = Search._index.titles; const scoreMap = new Map(); const fileMap = new Map(); // perform the search on the required terms searchTerms.forEach((word) => { const files = []; const arr = [ { files: terms[word], score: Scorer.term }, { files: titleTerms[word], score: Scorer.title }, ]; // add support for partial matches if (word.length > 2) { const escapedWord = _escapeRegExp(word); Object.keys(terms).forEach((term) => { if (term.match(escapedWord) && !terms[word]) arr.push({ files: terms[term], score: Scorer.partialTerm }); }); Object.keys(titleTerms).forEach((term) => { if (term.match(escapedWord) && !titleTerms[word]) arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); }); } // no match but word was a required one if (arr.every((record) => record.files === undefined)) return; // found search word in contents arr.forEach((record) => { if (record.files === undefined) return; let recordFiles = record.files; if (recordFiles.length === undefined) recordFiles = [recordFiles]; files.push(...recordFiles); // set score for the word in each file recordFiles.forEach((file) => { if (!scoreMap.has(file)) scoreMap.set(file, {}); scoreMap.get(file)[word] = record.score; }); }); // create the mapping files.forEach((file) => { if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); else fileMap.set(file, [word]); }); }); // now check if the files don't contain excluded terms const results = []; for (const [file, wordList] of fileMap) { // check if all requirements are matched // as search terms with length < 3 are discarded const filteredTermCount = [...searchTerms].filter( (term) => term.length > 2 ).length; if ( wordList.length !== searchTerms.size && wordList.length !== filteredTermCount ) continue; // ensure that none of the excluded terms is in the search result if ( [...excludedTerms].some( (term) => terms[term] === file || titleTerms[term] === file || (terms[term] || []).includes(file) || (titleTerms[term] || []).includes(file) ) ) break; // select one (max) score for the file. const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); // add result to the result list results.push([ docNames[file], titles[file], "", null, score, filenames[file], ]); } return results; }, /** * helper function to return a node containing the * search summary for a given text. keywords is a list * of stemmed words. */ makeSearchSummary: (htmlText, keywords) => { const text = Search.htmlToText(htmlText); if (text === "") return null; const textLower = text.toLowerCase(); const actualStartPosition = [...keywords] .map((k) => textLower.indexOf(k.toLowerCase())) .filter((i) => i > -1) .slice(-1)[0]; const startWithContext = Math.max(actualStartPosition - 120, 0); const top = startWithContext === 0 ? "" : "..."; const tail = startWithContext + 240 < text.length ? "..." : ""; let summary = document.createElement("p"); summary.classList.add("context"); summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; return summary; }, }; _ready(Search.init);
cutlass/python/docs/_static/searchtools.js/0
{ "file_path": "cutlass/python/docs/_static/searchtools.js", "repo_id": "cutlass", "token_count": 6918 }
54
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs.[](https://colab.research.google.com/github/NVIDIA/cutlass/tree/master/examples/00_basic_gemm.ipynb) We first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np import random import cutlass # This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to # omit this information. print_module = True m = 128 n = m k = m dtype = np.float16 type_A = np.float16 type_B = np.float16 type_C = np.float16 type_D = np.float16 np.random.seed(1234) random.seed(1234) scope_min = -4 scope_max = 4 tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A)) tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B)) tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C)) alpha = np.float16(1.) beta = np.float16(0.) tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output>/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Declaring and running a GEMMTo get started, one only needs to provide the tensors declared above to the `cutlass.op.Gemm` call.This sets up a default GEMM operation for the given device on which you are running.Assuming that we are running on SM80, this default to using a GEMM that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># We specify `element_accumulator` here so as to match the kernel run by NumPy below. However, # specifying `element_accumulator` is not required if it is the same as `element` plan = cutlass.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor, element_accumulator=np.float32) plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8 using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, float, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 128, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 3, cutlass::arch::OpMultiplyAdd >::GemmKernel; // Define named type struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_type : public cutlass_sm80_tensorop_f16_s1[...]<jupyter_text>There are many other ways to construct a plan from `cutlass.op.Gemm` (e.g., by specifiying they types and layouts of each operand, by providing representative tensors as inputs). For more details on these, see the documentation in the `cutlass.op.Gemm` constructor. We then compare the output to running the GEMM using NumPy.<jupyter_code>tensor_D_numpy = (alpha * (tensor_A @ tensor_B)) + (beta * tensor_C) np.testing.assert_array_equal(tensor_D, tensor_D_numpy)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond NumPy, such as PyTorch or CuPy. Changing operation modesBy default, the CUTLASS Python interface will try to use Tensor Core operations whenever possible. If the configuration provided to `cutlass.op.Gemm` is not supported on Tensor Cores, the interface will fall back to using a SIMT kernel.The operation mode currently in use can be returned via the `plan.opclass` property. In this case Tensor Core operations.<jupyter_code>print(plan.opclass)<jupyter_output>OpcodeClass.TensorOp<jupyter_text>Suppose that we don't want to use Tensor Cores for this GEMM. One can change to using CUTLASS's SIMT GEMMs by setting the plan's `opclass` field.As is shown in the printed output, the emitted kernel uses template parameters that fit CUTLASS's SIMT GEMMs.Also notice that, this time around, we provided tensor parameters to `plan.run()`. One is free to provide different parameters to `plan.run()` than were passed in at the initial call to `cutlass.op.Gemm`, provided that the passed-in tensors have the same data type and layout as those passed in on intialization.<jupyter_code>tensor_D_simt = np.zeros(tensor_C.shape).astype(type_D) plan.opclass = cutlass.OpcodeClass.Simt plan.run(tensor_A, tensor_B, tensor_C, tensor_D_simt, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1 using cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 1, cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 1, cutlass::half_t, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 8>, cutlass::gemm::GemmShape<32, 64, 8>, cutlass::gemm::GemmShape<1, 1, 1>, cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 1, float, float>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 2, cutlass::arch::OpMultiplyAdd >::GemmKernel; // Define named type struct cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_type : public cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_base { };<jupyter_text>If we compare the output of the Tensor Core and SIMT GEMMs we just ran we see that they are equal.<jupyter_code>np.testing.assert_array_equal(tensor_D, tensor_D_simt)<jupyter_output><empty_output><jupyter_text>Running cached kernelsYou may have noticed that the `plan.run()` calls for the previous two kernels took some time to execute. This is because the kernel being emitted had not yet been compiled.CUTLASS caches compiled binaries so that recompilation isn't necessary every time a kernel is run. For example, if we change modes back to using Tensor Cores and call `plan.run()` again (with a different set of tensor parameters), you'll find the call to return much faster.<jupyter_code>m = 2400 n = 3232 k = 4096 tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A)) tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B)) tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C)) tensor_D = np.zeros(tensor_C.shape).astype(type_D) alpha = np.float16(1.) beta = np.float16(2.) plan.opclass = cutlass.OpcodeClass.TensorOp plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8 using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, float, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 128, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 3, cutlass::arch::OpMultiplyAdd >::GemmKernel; // Define named type struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_type : public cutlass_sm80_tensorop_f16_s1[...]<jupyter_text>Running non-default GEMMsThe previous examples showed how it is simple to get started running a default GEMM kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the GEMM?Under the hood, CUTLASS enumerates the different GEMM configuration parameters possible for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernels (e.g., cluster, threadblock, and warp shape).<jupyter_code>tiles = plan.tile_descriptions() print('{} tile descriptions returned'.format(len(tiles))) num_print = 10 print('First {} tile descriptions are:'.format(num_print)) for td in tiles[:num_print]: print(td)<jupyter_output>132 tile descriptions returned First 10 tile descriptions are: { ClusterShape: [1, 1, 1] ThreadblockShape: [256, 128, 64] WarpCount: [4, 2, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [128, 256, 64] WarpCount: [2, 4, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [256, 128, 64] WarpCount: [4, 2, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [128, 256, 64] WarpCount: [2, 4, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [256, 128, 32] WarpCount: [4, 2, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [128, 256, 32] WarpCount: [2, 4, 1] Stages: 3 Kernel schedule: ScheduleAuto } { ClusterShape: [1, 1, 1] ThreadblockShape: [256, 64, 64] WarpCount: [4, 1, 1] Stages: 4 Kernel schedule: ScheduleAuto } { Cl[...]<jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>idx = random.randint(0, len(tiles)-1) td = tiles[idx] print('Tile description {} is: {}'.format(idx, td)) plan.compile(td) plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>Tile description 112 is: { ClusterShape: [1, 1, 1] ThreadblockShape: [128, 128, 32] WarpCount: [2, 2, 1] Stages: 4 Kernel schedule: ScheduleAuto }<jupyter_text>One can also change the swizzling function used by the kernel. For example, one can modify the kernel to use the stream K feature of CUTLASS via:<jupyter_code># Stream K is only supported pre-SM90 (at least when this example was written) if plan.cc != 90: plan.swizzling_functor = cutlass.swizzle.ThreadblockSwizzleStreamK plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8 using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, cutlass::half_t, cutlass::layout::RowMajor, float, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>, cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, 4, cutlass::arch::OpMultiplyAdd >::GemmKernel; // Define named type struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8_type : public cutlass_sm80_tensorop_f16_s16x8x16ge[...]<jupyter_text>Handling errorsThe CUTLASS Python interface attempts to catch runtime and compilation errors in Python so as to provide more understandable error messages.Here's an example in which we try to use too many stages for a given GEMM kernel. Normally, this would result in a runtime error due to the GPU having insufficient shared memory to launch the kernel with 8 stages. The CUTLASS Python interface is able to detect this issue before compiling the kernel, and reports it back to the user.<jupyter_code># td = tiles[0] # td.stages = 8 # plan.compile(td)<jupyter_output><empty_output>
cutlass/python/docs/externals/00_basic_gemm.ipynb/0
{ "file_path": "cutlass/python/docs/externals/00_basic_gemm.ipynb", "repo_id": "cutlass", "token_count": 4739 }
55
################################################################################ # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Unit test for store nodes in SM90 """ import logging import unittest import cutlass from cutlass.backend import * from cutlass.epilogue import * from utils.evt_testbed import EVTTestBed, EVTTestCaseBase cutlass.set_log_level(logging.WARNING) @unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]") class TestEVTLayout(EVTTestCaseBase): def test_permute_1(self): """ Returning a tensor with shape [m, n] """ def evt_permute(accum, alpha, C): F = alpha * accum F_permute = permute(F, indices=(0, 2, 1)) D_permute = F_permute + permute(C, indices=(0, 2, 1)) D = permute(D_permute, indices=(0, 2, 1)) return D, F for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "alpha": 0.5, "C": self.fake_tensor(self.element, (l, m, n)), "F": self.fake_tensor(self.element, (l, m, n)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_permute, example_inputs) input_keys = ["C", "alpha"] result_keys = ["D", "F"] launcher.verify((m, n, k), input_keys, result_keys, l) @unittest.skipIf(device_cc() != 90, "This unittest is for cc = Sm90 only") def test_permute_2(self): """ Returning a tensor with shape [m, n] """ def evt_permute(accum, alpha, C): F = alpha * accum F_permute = permute(F, indices=(0, 2, 1)) D = F_permute + C return D, F for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "alpha": 0.5, "C": self.fake_tensor(self.element, (l, n, m)), "F": self.fake_tensor(self.element, (l, m, n)), "D": self.fake_tensor(self.element, (l, n, m)), } launcher = EVTTestBed(self.element, evt_permute, example_inputs) input_keys = ["C", "alpha"] result_keys = ["D", "F"] launcher.verify((m, n, k), input_keys, result_keys, l) @unittest.skipIf(device_cc() != 90, "This unittest is for cc = Sm90 only") def test_permute_3(self): """ Returning a tensor with shape [m, n] """ def evt_permute(accum, alpha, C): F = alpha * accum F_permute = permute(F, indices=(1, 0, 2)) D = F_permute + C return D, F for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "alpha": 0.5, "C": self.fake_tensor(self.element, (m, l, n)), "F": self.fake_tensor(self.element, (l, m, n)), "D": self.fake_tensor(self.element, (m, l, n)), } launcher = EVTTestBed(self.element, evt_permute, example_inputs) input_keys = ["C", "alpha"] result_keys = ["D", "F"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_reshape(self): """ Test reshape """ def evt_reshape(accum, alpha, TensorE): F = alpha * accum E_reshape = reshape(TensorE, new_shape=(512, 1)) D = F + E_reshape return D example_inputs = { "accum": self.fake_tensor(self.element, (self.l, self.m, self.n)), "alpha": 0.5, "TensorE": self.fake_tensor(self.element, (16, 32)), "D": self.fake_tensor(self.element, (self.l, self.m, self.n)), } launcher = EVTTestBed(self.element, evt_reshape, example_inputs) input_keys = ["alpha", "TensorE"] result_keys = ["D"] launcher.verify(self.problem_size, input_keys, result_keys, self.l) def test_reshape2(self): """ Test reshape """ def evt_reshape(accum, alpha, TensorE): F = alpha * accum F_reshape = reshape(F, new_shape=(2, 3, 512, 256)) D = F_reshape + TensorE return D example_inputs = { "accum": self.fake_tensor(self.element, (self.l, self.m, self.n)), "alpha": 0.5, "TensorE": self.fake_tensor(self.element, (2, 3, 1, self.n)), "D": self.fake_tensor(self.element, (2, 3, self.m, self.n)), } launcher = EVTTestBed(self.element, evt_reshape, example_inputs) input_keys = ["alpha", "TensorE"] result_keys = ["D"] launcher.verify(self.problem_size, input_keys, result_keys, self.l) if __name__ == '__main__': unittest.main()
cutlass/test/python/cutlass/evt/evt_layout_sm80_90.py/0
{ "file_path": "cutlass/test/python/cutlass/evt/evt_layout_sm80_90.py", "repo_id": "cutlass", "token_count": 3030 }
56
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# from math import prod import os import re import subprocess import torch from cutlass_library import ( DataType, DataTypeSize, GemmUniversalMode, LayoutType, OpcodeClass, ShortDataTypeNames, SwizzlingFunctor ) from cutlass.backend import compiler from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal from cutlass.backend.reduction_operation import ReductionArguments, ReductionOperation from cutlass.shape import GemmCoord, MatrixCoord from cutlass.utils.datatypes import torch_type class GemmUniversalLauncher: def __init__( self, operation, seed=2080, verification=True, iterations=500, compiler_mode= "nvcc", **kwargs, ) -> None: self.math_operation = operation.tile_description.math_instruction.math_operation self.verification = verification if compiler_mode == "nvcc": compiler.nvcc() elif compiler_mode == "nvrtc": compiler.nvrtc() else: raise Exception(f"Unexpected compiler string {compiler_mode}") op_list = [operation] if operation.arch < 90: # Split K via Python is currently only supported for pre-SM90 kernels self.reduction_operation: ReductionOperation = ReductionOperation( shape=MatrixCoord(4, 32 * operation.C.alignment), C=operation.C, element_accumulator=operation.tile_description.math_instruction.element_accumulator, element_compute=operation.epilogue_functor.element_epilogue, epilogue_functor=operation.epilogue_functor, count=operation.C.alignment, ) op_list.append(self.reduction_operation) compiler.add_module(op_list, bypass_cache=False) self.operation = operation self.dtype_A = torch_type(operation.A.element if not self.operation.switched else self.operation.B.element) self.dtype_B = torch_type(operation.B.element if not self.operation.switched else self.operation.A.element) self.dtype_C = torch_type(operation.C.element) self.dtype_D = torch_type(operation.epilogue_functor.element_output) element_size = min(DataTypeSize[operation.A.element], DataTypeSize[operation.B.element]) if element_size == 1: self.rand_max = 1 self.rand_min = 0 elif element_size <= 8: self.rand_max = 1 self.rand_min = -1 elif element_size == 16: self.rand_max = 4 self.rand_min = -4 else: self.rand_max = 8 self.rand_min = -8 self.seed = seed self.compute_type = operation.epilogue_functor.element_epilogue self.accumulator_type = operation.tile_description.math_instruction.element_accumulator def print_problem_size(self, p, mode, batch_count): if mode == GemmUniversalMode.Gemm: mode = "Gemm" elif mode == GemmUniversalMode.Batched: mode = "GemmBatched" elif mode == GemmUniversalMode.GemmSplitKParallel: mode = "GemmSplitKParallel" print(f"problem: {p.m}, {p.n}, {p.k}\n batch_count: {batch_count}\n mode: {mode}") def uniform_init(self, shape, dtype, layout): size = prod(shape) if dtype.is_floating_point: # Initialize data in FP32 and call convert to the data type we desire. # This is a workaround for the following error that occurs when attempting to # call uniform_ on a tensor with torch.float8_e4m3fn data: # RuntimeError: "check_uniform_bounds" not implemented for 'Float8_e4m3fn' data = torch.ceil( torch.empty(size=(size,), dtype=torch.float32, device="cuda").uniform_( self.rand_min - 0.5, self.rand_max - 0.5) ).to(dtype) else: # PyTorch does not currently support integer-typed matrix multiplications on GPU. # Fall back to CPU for integer type references. data = torch.empty(size=(size,), dtype=dtype, device="cpu").random_(self.rand_min, self.rand_max + 1) is_fp8 = dtype == getattr(torch, "float8_e4m3fn", -1) or dtype == dtype == getattr(torch, "float8_e5m2", -1) if dtype == torch.float64 or dtype == torch.float32 or is_fp8: data = data.to("cpu") data_ref = data.reshape(shape) if layout == LayoutType.RowMajor: data_cutlass = data_ref else: data_cutlass = data_ref.transpose(-1, -2).contiguous() data_cutlass = data_cutlass.to("cuda") # As of this writing, few operations in PyTorch are supported with FP8 data. # Thus, we perform computation in FP32 for FP8 reference checks. if is_fp8: data_ref = data_ref.to(torch.float32) return data_cutlass, data_ref def reference(self, problem_size, tensor_A, tensor_B, tensor_C, alpha, beta): # If any tensor is on CPU, place all tensors on CPU unless only # tensor C is on CPU # Handle mixed-input cases by casting to the larger data type and overriding # to whatever the data type of the larger type is if self.dtype_A != self.dtype_B: if DataTypeSize[self.operation.A.element] < DataTypeSize[self.operation.B.element]: tensor_A = tensor_A.to(self.dtype_B).to(tensor_B.device) else: tensor_B = tensor_B.to(self.dtype_A).to(tensor_A.device) devices = [x.device.type for x in [tensor_A, tensor_B]] if tensor_C is not None: devices.append(tensor_C.device.type) if "cpu" in devices and devices != ["cuda", "cuda", "cpu"]: device = torch.device("cpu") else: device = tensor_A.device tensor_A = tensor_A.to(device) tensor_B = tensor_B.to(device) if tensor_C is not None: tensor_C = tensor_C.to(device) dtype = torch_type(self.compute_type) alpha_torch = torch.tensor([alpha], device=device).to(dtype) beta_torch = torch.tensor([beta], device=device).to(dtype) tmp = tensor_A @ tensor_B tensor_D_ref = (alpha_torch * tmp) if tensor_C is not None: tensor_D_ref += (tensor_C * beta_torch) return tensor_D_ref.to(self.dtype_D) def run(self, mode, problem_size, batch_count=1, split_k_slices=1, alpha=1.0, beta=0.0): torch.random.manual_seed(self.seed) # Assign an actual batch count in cases where we are not running in batched mode. # This is to differentiate between the number of split K slices and the batch count, # which are overloaded within the single `batch_count` variable. if mode == GemmUniversalMode.Batched: true_batch_count = batch_count else: true_batch_count = 1 def transpose(layout): if layout == LayoutType.RowMajor: return LayoutType.ColumnMajor else: return LayoutType.RowMajor tensor_A, tensor_A_ref = self.uniform_init( (true_batch_count, problem_size.m, problem_size.k), self.dtype_A, self.operation.A.layout if not self.operation.switched else transpose(self.operation.B.layout), ) tensor_B, tensor_B_ref = self.uniform_init( (true_batch_count, problem_size.k, problem_size.n), self.dtype_B, self.operation.B.layout if not self.operation.switched else transpose(self.operation.A.layout), ) if self.dtype_C is not None: tensor_C, tensor_C_ref = self.uniform_init( (true_batch_count, problem_size.m, problem_size.n), self.dtype_C, self.operation.C.layout if not self.operation.switched else transpose(self.operation.C.layout), ) else: tensor_C = None tensor_C_ref = None tensor_D, _ = self.uniform_init( (true_batch_count, problem_size.m, problem_size.n), self.dtype_D, self.operation.C.layout if not self.operation.switched else transpose(self.operation.C.layout), ) tensor_D = torch.zeros_like(tensor_D) if self.compute_type in [DataType.s8, DataType.s32, DataType.u8, DataType.u32]: alpha = int(alpha) beta = int(beta) # # Launch kernel # arguments = GemmArguments( operation=self.operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=self.operation.epilogue_type(alpha, beta), gemm_mode=mode, split_k_slices=split_k_slices, batch=batch_count, ) if mode == GemmUniversalMode.GemmSplitKParallel: reduction_arguments = ReductionArguments( self.reduction_operation, problem_size=[problem_size.m, problem_size.n], partitions=split_k_slices, workspace=arguments.ptr_D, destination=tensor_D, source=tensor_C, output_op=self.reduction_operation.epilogue_type(alpha, beta), ) self.operation.run(arguments) if mode == GemmUniversalMode.GemmSplitKParallel: self.reduction_operation.run(reduction_arguments) passed = True if self.verification: if mode == GemmUniversalMode.GemmSplitKParallel: reduction_arguments.sync() # Free memory allocated by args because we are not # calling `arguments.sync()` in this case (which will free memory) arguments.free() else: arguments.sync() tensor_D_ref = self.reference( problem_size, tensor_A_ref, tensor_B_ref, tensor_C_ref, alpha, beta, ) tensor_D_ref = tensor_D_ref.to('cuda') if self.operation.switched or self.operation.C.layout == LayoutType.ColumnMajor: tensor_D = tensor_D.transpose(-1, -2).contiguous() passed = tensor_D.equal(tensor_D_ref) try: assert passed except AssertionError: self.print_problem_size(problem_size, mode, batch_count) del arguments if mode == GemmUniversalMode.GemmSplitKParallel: del reduction_arguments return passed def test_all_gemm(operation: "GemmOperationUniversal", testcase="universal", compilation_mode="nvcc"): passed = True minimum_operand_element_size = min( DataTypeSize[operation.A.element], DataTypeSize[operation.B.element] ) opcode_class = operation.tile_description.math_instruction.opcode_class if opcode_class == OpcodeClass.Simt: alignment = 1 else: alignment = 128 // minimum_operand_element_size alignment_m = alignment alignment_n = alignment alignment_k = alignment # INT8 alignment constraints if opcode_class == OpcodeClass.Simt: A_is_s8 = operation.A.element == DataType.s8 B_is_s8 = operation.B.element == DataType.s8 if A_is_s8 and operation.A.layout == LayoutType.ColumnMajor: alignment_m = 4 if B_is_s8 == DataType.s8 and operation.A.layout == LayoutType.RowMajor: alignment_n = 4 if A_is_s8 and B_is_s8 and (operation.A.layout == LayoutType.RowMajor or operation.B.layout == LayoutType.ColumnMajor): alignment_k = 4 threadblock_k = operation.tile_description.threadblock_shape[2] assert testcase != "interleaved" supports_split_k = operation.arch < 90 and not operation.swizzling_functor == SwizzlingFunctor.StreamK if testcase == "multistage": modes = [GemmUniversalMode.Gemm] problem_size_m = [16, 528] problem_size_n = [16, 528] problem_size_k = [ threadblock_k, threadblock_k * operation.tile_description.stages + operation.tile_description.math_instruction.instruction_shape[2], ] problem_alpha = [1.0] problem_beta = [0.0] batch_counts = [1] else: modes = [GemmUniversalMode.Gemm] batch_counts = [1, 2, 3, 5, 7] if supports_split_k: modes.append(GemmUniversalMode.GemmSplitKParallel) problem_size_m = [alignment_m, 512 - 3 * alignment_m] problem_size_n = [alignment_n, 512 - 2 * alignment_n] if operation.tile_description.stages is None: stages_for_k_calc = 7 else: stages_for_k_calc = operation.tile_description.stages problem_size_k = [ alignment_k, threadblock_k * stages_for_k_calc - alignment_k, threadblock_k * stages_for_k_calc * 3 - alignment_k, ] problem_alpha = [1.0] problem_beta = [2.0] testbed = GemmUniversalLauncher(operation, compiler_mode=compilation_mode) for mode in modes: for m in problem_size_m: for n in problem_size_n: for k in problem_size_k: for batch_count in batch_counts: for alpha in problem_alpha: for beta in problem_beta: # skip very small K problems if testcase == "universal": if k // batch_count < 2 * threadblock_k: continue problem_size = GemmCoord(m, n, k) if supports_split_k: split_k_slices = batch_count else: split_k_slices = 1 overridden_mode = mode if mode == GemmUniversalMode.Gemm and batch_count > 1: overridden_mode = GemmUniversalMode.Batched passed = testbed.run( overridden_mode, problem_size, batch_count, split_k_slices, alpha, beta, ) if not passed: return False return passed
cutlass/test/python/cutlass/gemm/gemm_testbed.py/0
{ "file_path": "cutlass/test/python/cutlass/gemm/gemm_testbed.py", "repo_id": "cutlass", "token_count": 7721 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM testbed */ #pragma once #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "conv2d_problems.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/core_io.h" #include "cutlass/util/tensor_view_io.h" #include "../cache_testbed_output.h" namespace test { namespace conv { namespace device { template <typename Conv2d, int InterleavedK> class InterleavedTestbedConv2d { public: using ElementA = typename Conv2d::ElementA; using LayoutA = typename Conv2d::LayoutA; using ElementB = typename Conv2d::ElementB; using LayoutB = typename Conv2d::LayoutB; using ElementC = typename Conv2d::ElementC; using LayoutC = typename Conv2d::LayoutC; using ElementAccumulator = typename Conv2d::ElementAccumulator; using ElementCompute = typename Conv2d::ElementCompute; using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp; static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator; /// Reduction kernel using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp >; using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>; using ReductionStrideIndex = typename ReductionDevice::StrideIndex; public: /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<ElementA, LayoutA> tensor_A; cutlass::HostTensor<ElementB, LayoutB> tensor_B; cutlass::HostTensor<ElementB, LayoutB> tensor_B_reordered; cutlass::HostTensor<ElementC, LayoutC> tensor_C; cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed; cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference; public: InterleavedTestbedConv2d( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> void initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { int scope; int bits = cutlass::sizeof_bits<Element>::value; if (bits <= 8) { scope = 2; } else if (bits == 16) { scope = 3; } else { scope = 8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope, -scope, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential(view.data(), view.capacity()); } else { } } void initialize( cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) { tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size)); tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size)); tensor_B_reordered.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size)); tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); initialize_tensor(tensor_A.host_view(), init_A, seed); initialize_tensor(tensor_B.host_view(), init_B, seed * 17); initialize_tensor(tensor_C.host_view(), init_C, seed * 39); cutlass::reorder_convK<InterleavedK>( tensor_B_reordered.host_ref(), tensor_B.host_ref(), implicit_gemm_problem_size(kConvolutionalOperator, problem_size)); tensor_A.sync_device(); tensor_B.sync_device(); tensor_B_reordered.sync_device(); tensor_C.sync_device(); tensor_D_computed.sync_device(); tensor_D_reference.sync_device(); } bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerMultiprocessor < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::conv::Conv2dProblemSize const &problem_size, cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 //display conv2d problem size for debugging std::cout << problem_size << std::endl << "alpha, beta: (" << float(alpha) << ", " << float(beta) << ")" << std::endl << "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl << std::endl; #endif initialize(problem_size); // configure the operator Conv2d conv2d_op; typename Conv2d::Arguments conv2d_args( problem_size, tensor_A.device_ref(), tensor_B_reordered.device_ref(), tensor_C.device_ref(), tensor_D_computed.device_ref(), {alpha, beta}, split_k_mode ); // find workspace requirement for parallel split-k reduction size_t workspace_size = Conv2d::get_workspace_size(conv2d_args); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get()); // conv2d operation with parallel split-k-mode if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // conv2d output is written to workspace in global memory conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get())); // accumulate mma for each cta in k-dimension (1.0 * A * B) conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)}; // update conv2d operator arguments status = conv2d_op.update(conv2d_args, workspace.get()); } EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } // run conv2d operator status = conv2d_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // configure parallel reduction operator ReductionDevice reduction_op; typename ReductionDevice::Arguments reduction_args( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), problem_size.split_k_slices, cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), { reinterpret_cast<ElementAccumulator*> (workspace.get()), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, { tensor_D_computed.device_data(), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, { tensor_C.device_data(), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, // apply alpha, beta to obtain the following equation alpha * ReduceAdd(A * B) + beta * C {alpha, beta} ); status = reduction_op.initialize(reduction_args, nullptr); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } // run prallel reduction kernel status = reduction_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } } bool passed = false; tensor_D_computed.sync_host(); // // Reference check - support caching results // CachedTestKey cached_test_key = CreateCachedConv2dTestKey< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementCompute >( kConvolutionalOperator, problem_size, alpha, beta, tensor_A.host_view(), tensor_B.host_view(), tensor_C.host_view() ); // // Look for the cached key // bool cached_result_loaded = false; CachedTestResult cached_test_result; std::string conv2d_result_cache_name = std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt"; if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { CachedTestResultListing cached_results(conv2d_result_cache_name); auto cached = cached_results.find(cached_test_key); cached_result_loaded = cached.first; if (cached_result_loaded) { cached_test_result = cached.second; } } if (!cached_result_loaded) { #if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED cutlass::reference::device::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, cutlass::NumericConverterClamp<ElementC, ElementCompute> >( kConvolutionalOperator, problem_size, tensor_A.device_ref(), tensor_B.device_ref(), tensor_C.device_ref(), tensor_D_reference.device_ref(), alpha, beta); cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " device reference error: " << cudaGetErrorString(result); // sync host (copy device data to host) for dumping error output in case of mismatches tensor_D_reference.sync_host(); #else cutlass::reference::host::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ElementC, cutlass::NumericConverterClamp<ElementC, ElementCompute> >( kConvolutionalOperator, problem_size, tensor_A.host_ref(), tensor_B.host_ref(), tensor_C.host_ref(), tensor_D_reference.host_ref(), alpha, beta); #endif if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { cached_test_result.D = TensorHash(tensor_D_reference.host_view()); CachedTestResultListing cached_results(conv2d_result_cache_name); cached_results.append(cached_test_key, cached_test_result); cached_results.write(conv2d_result_cache_name); } } // if (!cached_result_loaded) uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view()); if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { passed = (tensor_D_hash == cached_test_result.D); EXPECT_EQ(tensor_D_hash, cached_test_result.D) << "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n"; } else { passed = cutlass::reference::host::TensorEquals( tensor_D_computed.host_view(), tensor_D_reference.host_view()); } EXPECT_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_Conv2d_ImplicitGemm_device_" << (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_") << (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" : (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_")) << "ncxhwx_" << problem_size.N << "x" << problem_size.H << "x" << problem_size.W << "x" << problem_size.C << "_cxrskx_" << problem_size.K << "x" << problem_size.R << "x" << problem_size.S << "x" << problem_size.C << "_padding_" << problem_size.pad_h << "x" << problem_size.pad_w << "_stride_" << problem_size.stride_h << "x" << problem_size.stride_w << "_dilation_" << problem_size.dilation_h << "x" << problem_size.dilation_w << "_" << (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_") << Conv2d::ThreadblockShape::kM << "x" << Conv2d::ThreadblockShape::kN << "x" << Conv2d::ThreadblockShape::kK << "_" << Conv2d::WarpShape::kM << "x" << Conv2d::WarpShape::kN << "x" << Conv2d::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n"; results << "\nD reference (hash: " << cached_test_result.D << ")\n"; if (!cached_result_loaded) { results << tensor_D_reference.host_view() << "\n"; } results << "\nD computed (hash: " << tensor_D_hash << ")\n" << tensor_D_computed.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////// // TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference // TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes // Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes // (conv_blacklist_sizes) ///////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename ImplicitGemm, int InterleavedK> bool TestAllInterleavedConv2d( const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(), const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) { bool passed = true; // // Testbed object // InterleavedTestbedConv2d<ImplicitGemm, InterleavedK> testbed; // // Get conv problem sizes to run conv operator // TestbedConv2dProblemSizes conv_problems(InterleavedK); // minimum channel size must be multiple of InterleavedK for interleaved layout // Vector of conv2d problem sizes to avoid duplicate runs Conv2dProblemVector conv_tested_sizes; Conv2dProblemVector const *problem_vectors[] = { &conv_test_sizes, // run user specified sizes &conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes &conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes #if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED &conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled #endif }; // Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0) for (Conv2dProblemVector const * problem_vector : problem_vectors) { ChannelDivisibilitySpecification channel_spec(InterleavedK); //input and output channels must be multiple of InterleavedK auto pruned_problem_vector = prune(*problem_vector, channel_spec); // Run conv testbed on default convolution sizes for(auto conv_problem : pruned_problem_vector) { // Skip blacklist and avoid duplicate problem sizes if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() || std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) { continue; } // // Procedurally disable certain cases // // CUTLASS DGRAD's unity stride specialization only support stride {1, 1} if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad) && (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport == cutlass::conv::StrideSupport::kUnity)) { if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) { continue; } } // // Test // // push back tested problem size to avoid re-running duplicates conv_tested_sizes.push_back(conv_problem); // test mode = xcross passed = testbed.run( conv_problem, cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } // test mode = convolution passed = testbed.run( conv_problem.reset_mode(cutlass::conv::Mode::kConvolution), cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } } } #if 0 // Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for // a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters // which are abolutely necessary to catch functional bugs. The below code does provide option to sweep // alpha and beta for local testing, but only runs one value for alpha and beta. cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size ( {1, 17, 11, 288}, // input size (NHWC) {160, 3, 3, 288}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1} // dilation (dilation_h, dilation_w) ); cutlass::conv::SplitKMode split_k_modes [] = { cutlass::conv::SplitKMode::kSerial, cutlass::conv::SplitKMode::kParallel, }; int split_k_slices[] = { 1, 2, 3, 4, 201 }; double problem_alpha[] = { 2.0 }; double problem_beta[] = { 2.0 }; for (auto split_k_mode : split_k_modes) { for (auto split_k_slice : split_k_slices) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { passed = testbed.run( conv2d_split_k_test_size.reset_split_k_slices(split_k_slice), split_k_mode, cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha), cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta)); if (!passed) { return false; } } } } } #endif return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace conv } // namespace test
cutlass/test/unit/conv/device/conv2d_testbed_interleaved.h/0
{ "file_path": "cutlass/test/unit/conv/device/conv2d_testbed_interleaved.h", "repo_id": "cutlass", "token_count": 8832 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Depthwise Direct Conv interface */ #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_depthwise_fprop.h" #include "cutlass/conv/device/direct_convolution.h" #include "conv2d_testbed.h" #include "depthwise_conv2d_direct_conv_testbed.h" std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter3x3() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels <= 512; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 8, 8, channels}, // input size (NHWC) {channels, 3, 3, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); // if(channels == 512 || channels == 16*14) problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 16, 16, channels}, // input size (NHWC) {channels, 3, 3, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {2, 2}, // stride (stride_h, stride_w) {2, 2}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); } return problems; } std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x5() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels < 256; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 16, 16, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 112, 112, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 112, 112, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {2, 2}, // stride (stride_h, stride_w) {2, 2}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); } return problems; } std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x37() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels < 256; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 128, 128, channels}, // input size (NHWC) {channels, 5, 37, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 108, // split_k_slices channels // groups )); } return problems; } //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x32_4_8x32_3x3) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 32; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<3, 3>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<8, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 4; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter3x3())); } //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x64_3_16x64_5x5) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 64; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<5, 5>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter5x5())); } #if 0 //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x32_3_16x32_5x37) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 32; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<5, 37>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter5x37())); } #endif
cutlass/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu/0
{ "file_path": "cutlass/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu", "repo_id": "cutlass", "token_count": 7430 }
59