text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/thread/mma.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/thread/depthwise_mma.h" #include "cutlass/gemm/warp/mma_simt_tile_iterator.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK = 1, /// Complex transformation on operand A ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transformation on operand B ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool> class MmaDepthwiseSimt : public cutlass::gemm::warp:: MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_> { using Base = cutlass::gemm::warp:: MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_>; public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Indicates class of matrix operator using OperatorClass = arch::OpClassSimt; /// Hard-coded for now using ArchTag = arch::Sm50; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; public: /// Iterates over the B operand in memory using IteratorB = cutlass::conv::warp::DepthwiseMmaSimtTileIterator< MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>, cutlass::gemm::Operand::kB, ElementB, LayoutB, Policy, PartitionsK, Shape::kK >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed A tile using TransformedFragmentB = FragmentB; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaDepthwiseSimt():Base() {} }; /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> typename FilterShape_, /// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<> typename ThreadOutputShape_, /// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> typename ThreadBlockOutputShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Iterator algo type conv::IteratorAlgorithm IteratorAlgorithm_ = IteratorAlgorithm::kAnalytic, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape_ = cutlass::MatrixShape<-1, -1>, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape_ = cutlass::MatrixShape<-1, -1>, /// Activation Shape loaded by threadblock typename ActivationShape_ = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>, /// Number of partitions along K dimension int PartitionsK = 1, /// Complex transformation on operand A ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transformation on operand B ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool> class MmaDepthwiseDirectConvSimt { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> using FilterShape = FilterShape_; /// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<> using ThreadOutputShape = ThreadOutputShape_; /// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> using ThreadBlockOutputShape = ThreadBlockOutputShape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Iterator algo type static conv::IteratorAlgorithm const IteratorAlgorithm = IteratorAlgorithm_; /// Stride ( MatrixShape<Height, Width> ) using StrideShape = StrideShape_; /// Dilation ( MatrixShape<Height, Width> ) using DilationShape = DilationShape_; /// Activation Shape loaded by threadblock using ActivationShape = ActivationShape_; /// Indicates class of matrix operator using OperatorClass = arch::OpClassSimt; /// Hard-coded for now using ArchTag = arch::Sm50; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value || platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) && platform::is_same< ElementA, int8_t >::value && platform::is_same< ElementB, int8_t >::value; using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type; /// Thread-level matrix multiply accumulate operator using ThreadMma = cutlass::conv::thread::DepthwiseDirectConvElementwiseInnerProduct< cutlass::gemm::GemmShape< Shape::kM / Policy::WarpShape::kRow, // number of output pixels proccessed per thread Shape::kN / Policy::WarpShape::kColumn, // number of channels proccessed per thread 1>, ElementA, ElementB, ElementC, arch::OpMultiplyAdd, dp4a_type >; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename ThreadMma::ArchMmaOperator; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Shape of the underlying instruction using InstructionShape = cutlass::gemm::GemmShape<1,1,use_dp4a ? 4 : 1>; public: /// Iterates over the A operand in memory using IteratorA = cutlass::conv::warp::DepthwiseDirect2dConvSimtTileIterator< MatrixShape<Shape::kM, Shape::kN>, // <output tile=(P*Q), output channels> per warp FilterShape, ThreadOutputShape, ThreadBlockOutputShape, cutlass::gemm::Operand::kA, ElementA, Policy, IteratorAlgorithm, StrideShape, DilationShape, ActivationShape, PartitionsK, Shape::kK >; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile using TransformedFragmentA = FragmentA; /// Iterates over the B operand in memory using IteratorB = cutlass::gemm::warp::MmaSimtTileIterator< MatrixShape<1, Shape::kN>, cutlass::gemm::Operand::kB, ElementB, LayoutB, Policy, PartitionsK, Shape::kK >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed A tile using TransformedFragmentB = FragmentB; /// Iterates over the C operand in memory using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator< MatrixShape<Shape::kM, Shape::kN>, cutlass::gemm::Operand::kC, ElementC, LayoutC, Policy >; /// Storage for C tile using FragmentC = typename ThreadMma::FragmentC; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaDepthwiseDirectConvSimt() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &d, FragmentA a, FragmentB b, FragmentC const &c, int group_idx = 0) const { ThreadMma mma; mma(d, a, b, c); } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { dst_A = A; dst_B = B; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace conv } // namespace cutlass
cutlass/include/cutlass/conv/warp/mma_depthwise_simt.h/0
{ "file_path": "cutlass/include/cutlass/conv/warp/mma_depthwise_simt.h", "repo_id": "cutlass", "token_count": 4004 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing conversion operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts the result without other operations /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class Convert { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementAccumulator_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = FragmentAccumulator; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = false; /// Host-constructable parameters structure struct Params { // // Methods // CUTLASS_HOST_DEVICE Params() {} }; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE Convert(Params const &params = Params()) { } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { } /// Returns true if source is needed based on state of runtime arguments CUTLASS_HOST_DEVICE constexpr bool is_source_needed() const { return false; } /// Constexpr function to enable the compiler to optimize away the source loading if it is /// never needed. CUTLASS_HOST_DEVICE constexpr bool is_source_ever_needed() const { return false; } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source = FragmentOutput(), ElementCompute uniform = ElementCompute(0)) const { // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementAccumulator, kCount, Round> destination_converter; return destination_converter(accumulator); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/thread/conversion_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/conversion_op.h", "repo_id": "cutlass", "token_count": 1313 }
24
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default configuration for epilogue computing absolute maximum of output and auxiliary outputs. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_with_absmax.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for absolute-maximum-computing epilogues with TensorOps template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementAuxOutput, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithAbsMax { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; // // Stores the output // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; // // Stores the auxiliary output // using AuxOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementAuxOutput, ScatterD, PermuteDLayout >; /// Define the epilogue using Epilogue = EpilogueWithAbsMax< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, AuxOutputTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding, Base::kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h", "repo_id": "cutlass", "token_count": 1219 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMM/CONV to store accumulator in shared memory after applying scale, bias loaded from global memory and element-wise operations. This Epilogue is typically used in fused GEMM/CONV to stage the intermediate accumulator. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename SmemTileIterator_, ///< Shared memory Tile iterator to output to shared memory typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename ScaleBiasIterator_, ///< Iterator to load scale and bias from global memory typename OutputOp_ ///< Output operator > class EpilogueSmemAccumulator { public: using SmemTileIterator = SmemTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using ScaleBiasIterator = ScaleBiasIterator_; using OutputOp = OutputOp_; /// Fragment of accumulator tile using FragmentAccumulator = typename AccumulatorFragmentIterator::Fragment; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Fragment of Scale and Bias loaded from global memory using FragmentScaleBias = typename ScaleBiasIterator::Fragment; static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); /// Constructor CUTLASS_DEVICE EpilogueSmemAccumulator() {} /// Streams the result to shared memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory AccumulatorTile const &accumulator, ///< Complete warp-level accumulator tile ScaleBiasIterator scale_iterator, ///< iterator for scale vector in global memory ScaleBiasIterator bias_iterator) { ///< iterator for bias vector in global memory // Fragment to load scale bias from global memory FragmentScaleBias tb_frag_scale; FragmentScaleBias tb_frag_bias; /// Fragment Iterator to load slice of accumulator tile AccumulatorFragmentIterator frag_iterator_accum(accumulator); FragmentAccumulator tb_frag_accum; /// Epilogue output fragment typename SmemTileIterator::Fragment tb_frag_smem; /// Load scale and bias from global memory if(PerChannelScale) scale_iterator.load(tb_frag_scale); bias_iterator.load(tb_frag_bias); /// Iterate over the accumulator tile and store to shared memory CUTLASS_PRAGMA_UNROLL for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) { CUTLASS_PRAGMA_UNROLL for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) { using AccumulatorAccessType = typename OutputOp::FragmentAccumulator; using ScaleBiasAccessType = typename OutputOp::FragmentScaleBias; using FragmentSmemAccessType = typename OutputOp::FragmentOutput; ScaleBiasAccessType const * scale_frag_ptr = reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_scale); ScaleBiasAccessType const * bias_frag_ptr = reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_bias); FragmentSmemAccessType * smem_frag_ptr = reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) { frag_iterator_accum.load(tb_frag_accum); ++frag_iterator_accum; AccumulatorAccessType const * accumulator_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum); const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount; CUTLASS_PRAGMA_UNROLL for (int it = 0; it < kOutputIterations; it++) { smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it], scale_frag_ptr[cid * kOutputIterations + it], bias_frag_ptr[cid * kOutputIterations + it]); } } smem_iterator.store(tb_frag_smem); ++smem_iterator; } } } /// Streams the result to shared memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory AccumulatorTile const &accumulator) { ///< Complete warp-level accumulator tile /// Fragment Iterator to load slice of accumulator tile AccumulatorFragmentIterator frag_iterator_accum(accumulator); FragmentAccumulator tb_frag_accum; /// Epilogue output fragment typename SmemTileIterator::Fragment tb_frag_smem; /// Iterate over the accumulator tile and store to shared memory CUTLASS_PRAGMA_UNROLL for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) { CUTLASS_PRAGMA_UNROLL for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) { using AccumulatorAccessType = typename OutputOp::FragmentAccumulator; using FragmentSmemAccessType = typename OutputOp::FragmentOutput; FragmentSmemAccessType * smem_frag_ptr = reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) { frag_iterator_accum.load(tb_frag_accum); ++frag_iterator_accum; AccumulatorAccessType const * accumulator_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum); const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount; CUTLASS_PRAGMA_UNROLL for (int it = 0; it < kOutputIterations; it++) { smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it]); } } smem_iterator.store(tb_frag_smem); ++smem_iterator; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h", "repo_id": "cutlass", "token_count": 3246 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tuple defining point in output tile template < int Column, int Row, int Group, int Cluster, int Tile > struct OutputTileShape { static int const kColumn = Column; static int const kRow = Row; static int const kGroup = Group; static int const kCluster = Cluster; static int const kTile = Tile; static int const kCount = kColumn * kRow * kGroup * kCluster * kTile; }; //////////////////////////////////////////////////////////////////////////////// template <typename Iterations, typename Delta> struct OutputTileThreadMapHelpers { /// Determines the iteration index of a vector access according to the thread map CUTLASS_HOST_DEVICE static void iteration_index( int &column_idx, int &row_idx, int &group_idx, int &cluster_idx, int &tile_idx, int iter_idx) { column_idx = iter_idx % Iterations::kColumn; int residual = iter_idx / Iterations::kColumn; row_idx = residual % Iterations::kRow; residual = residual / Iterations::kRow; group_idx = residual % Iterations::kGroup; residual = residual / Iterations::kGroup; cluster_idx = residual % Iterations::kCluster; tile_idx = residual / Iterations::kCluster; } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { int column_idx; int row_idx; int group_idx; int cluster_idx; int tile_idx; iteration_index(column_idx, row_idx, group_idx, cluster_idx, tile_idx, iter_idx); return MatrixCoord( row_idx * Delta::kRow + group_idx * Delta::kGroup + cluster_idx * Delta::kCluster + tile_idx * Delta::kTile, column_idx * Delta::kColumn); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ThreadMap_, typename Shape_, typename Iterations_, typename Delta_, typename Count_ > struct OutputTileThreadMap : public OutputTileThreadMapHelpers<Iterations_, Delta_> { /// Conventional thread map (concept: ThreadMap) using ThreadMap = ThreadMap_; /// Number of threads participating in the operation static int const kThreads = ThreadMap::kThreads; /// Number of scalar elements per access static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Shape of the tile using Shape = Shape_; /// Iterations performed by each thread using Iterations = Iterations_; /// Delta between accesses using Delta = Delta_; /// Number of iterator iterations using Count = Count_; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { using Index = typename layout::PitchLinearCoord::Index; layout::PitchLinearCoord coord = ThreadMap::initial_offset(thread_idx); Index cluster = coord.strided() / (Shape::kGroup * Shape::kRow); Index cluster_residual = coord.strided() % (Shape::kGroup * Shape::kRow); Index group = cluster_residual / (Shape::kRow); Index row = cluster_residual % (Shape::kRow); return MatrixCoord{ row + group * Shape::kRow * Count::kRow + cluster * Shape::kGroup * Count::kGroup * Shape::kRow * Count::kRow, coord.contiguous() }; } }; //////////////////////////////////////////////////////////////////////////////// namespace detail { /// RowArrangement determines how one or more warps cover a region of consecutive rows. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize, bool Is2dTile > struct RowArrangement; /// RowArrangement in which each warp's access is a 1D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> { static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; static int const kIterationsRow = 1; static int const kDeltaRow = 1; static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize; static int const kDeltaColumn = kWarpSize * kElementsPerAccess; static int const kAccessWidth = kWarpSize; static int const kAccessRows = 1; static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = WarpsRemaining; }; /// RowArrangement in which each warp's access is a 2D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> { static int const kMemoryAccessSize = 256; // Preferred access size static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; struct Detail { static int const kShapeRow = Shape::kRow / WarpsRemaining; static int const kShapeWidth = Shape::kColumn / kElementsPerAccess; static int const kTargetMemoryAccessWidth = kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8); static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth; }; static int const kAccessWidth = (Detail::kTargetAccessRows > Detail::kShapeRow ? kWarpSize / Detail::kShapeRow : const_min( Detail::kShapeWidth, const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8)) )); static int const kAccessRows = (Detail::kTargetAccessRows > Detail::kShapeRow ? Detail::kShapeRow : const_min(Shape::kRow, kWarpSize / kAccessWidth)); static int const kIterationsRow = Detail::kShapeRow / kAccessRows; static int const kDeltaRow = kAccessRows; static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth; static int const kDeltaColumn = kAccessWidth * kElementsPerAccess; static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access"); static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" ); static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" ); static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = 1; }; } //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D space across warps to achieve several performance /// objectives: /// /// - coalesced memory accesses in units of 128 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template < typename Shape_, typename Count_, int Threads, int ElementsPerAccess, int ElementSize > struct OutputTileOptimalThreadMap { using Shape = Shape_; using Count = Count_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail { // Clusters static int const kIterationsCluster = ((Shape::kCluster > kWarpCount) ? Shape::kCluster / kWarpCount : 1); static int const kDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kCompactedDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kWarpPartitionsCluster = ((Shape::kCluster > kWarpCount) ? kWarpCount : kWarpCount / Shape::kCluster); static int const kWarpsRemainingForGroups = ((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster); // Groups static int const kIterationsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kGroup / kWarpsRemainingForGroups : 1); static int const kDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kCompactedDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kWarpPartitionsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); static int const kWarpsRemainingForRows = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); // Rows using RowArrangement = detail::RowArrangement< Shape, kWarpsRemainingForRows, kElementsPerAccess, kElementSize, (Shape::kRow > kWarpsRemainingForRows) >; // Warp partitions using WarpPartitions = OutputTileShape< RowArrangement::kWarpPartitionsColumn, RowArrangement::kWarpPartitionsRow, kWarpPartitionsGroup, kWarpPartitionsCluster, 1>; static int const kAccessWidth = RowArrangement::kAccessWidth; static int const kAccessRows = RowArrangement::kAccessRows; }; // // Output // using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kDeltaGroup, Detail::kDeltaCluster, 1>; /// Initial offset function CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { // int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup; int group_offset = group_idx * Shape::kRow * Count::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; return MatrixCoord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { return OutputTileThreadMapHelpers<Iterations, Delta>::iteration_offset(iter_idx); } /// Compacted thread map in which the 4D region is contiguous struct CompactedThreadMap { using Shape = Shape_; using TileShape = MatrixShape< Shape::kTile * Shape::kCluster * Shape::kGroup * Shape::kRow, Shape::kColumn >; using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kCompactedDeltaGroup, Detail::kCompactedDeltaCluster, 1>; /// Number of elements within each vector access static int const kElementsPerAccess = ElementsPerAccess; /// Number of threads static int const kThreads = Threads; /// Function to compute each thread's initial offset CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { // int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Shape::kGroup; int group_offset = group_idx * Shape::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; MatrixCoord coord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); return coord; } }; }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 3D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = layout::PitchLinearShape<kWarpSize * kElementsPerAccess, 1>; /// Initial offset function CUTLASS_HOST_DEVICE static layout::PitchLinearCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location layout::PitchLinearCoord warp_footprint{ Delta::kContiguous * Iterations::kContiguous, Delta::kStrided * Iterations::kStrided}; layout::PitchLinearCoord warp_offset{warp_idx % WarpCount::kContiguous, warp_idx / WarpCount::kContiguous}; // Compute per-lane offset layout::PitchLinearCoord thread_offset_in_warp{ lane_idx * kElementsPerAccess, 0}; layout::PitchLinearCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedConvOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = MatrixShape<kWarpSize / 4, 4 * kElementsPerAccess>; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location MatrixCoord warp_footprint{ Delta::kRow * Iterations::kRow, Delta::kColumn * Iterations::kColumn, }; MatrixCoord warp_offset{warp_idx % WarpCount::kRow, warp_idx / WarpCount::kRow}; // Compute per-lane offset MatrixCoord thread_offset_in_warp{lane_idx / 4, (lane_idx % 4) * kElementsPerAccess}; MatrixCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/threadblock/output_tile_thread_map.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/output_tile_thread_map.h", "repo_id": "cutlass", "token_count": 6655 }
27
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/arch/mma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/detail/layout.hpp" #include "cutlass/detail/collective.hpp" #include "cutlass/detail/dependent_false.hpp" #include "cute/atom/mma_traits_sm90_gmma.hpp" #include "cute/atom/copy_traits_sm90_tma.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { // // Some named constants // constexpr int tma_alignment_bytes = 16; constexpr int cp_async_min_alignment_bytes = 4; constexpr int sm90_smem_capacity_bytes = 232448; // Maps 2.x A matrix layout tag to respective GMMA major mode enum template <class ElementA, class LayoutA> constexpr cute::GMMA::Major gmma_ss_tag_to_major_A() { // MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>() && not cute::is_same_v<ElementA, tfloat32_t> && sizeof(ElementA) != 1) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } // Maps 2.x B matrix layout tag to respective GMMA major mode enum template <class ElementB, class LayoutB> constexpr cute::GMMA::Major gmma_ss_tag_to_major_B() { // MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>() && not cute::is_same_v<ElementB, tfloat32_t> && sizeof(ElementB) != 1) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } template <class LayoutA> constexpr cute::GMMA::Major gmma_rs_tag_to_major_A() { // MN major mode is only valid for non-TF32 and non-int MMAs if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>()) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } template <class LayoutB> constexpr cute::GMMA::Major gmma_rs_tag_to_major_B() { // MN major mode is only valid for non-TF32 and non-int MMAs if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>()) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } // Maps a rank-1 cute::Shape<> representing the cluster shape on to the TMA atom that should be used with it template <class UnimodalClusterShape> constexpr auto sm90_cluster_shape_to_tma_atom(UnimodalClusterShape) { static_assert(cute::rank(UnimodalClusterShape{}) == 1, "Use this function to figure out TMA for each mode individually."); if constexpr (cute::size(UnimodalClusterShape{}) == 1) { return cute::SM90_TMA_LOAD{}; } else { return cute::SM90_TMA_LOAD_MULTICAST{}; } } // Generates the most efficient possible TiledCopy with cp.async copy atom given a set of parameters. template<int ThreadCount, class Element, int Alignment, class StrideType, class TileMN, class TileK> constexpr auto make_cp_async_gmem_tiled_copy() { using namespace cute; using AlignmentType = cute::uint_byte_t<static_cast<int>(sizeof(Element)) * Alignment>; constexpr int TileSizeMN = cute::size(TileMN{}); constexpr int TileSizeK = cute::size(TileK{}); // Maximize the number of threads along the gmem major mode to promote coalesced reads // While making sure our thread layout tiles the threadblock tile evenly if constexpr (cutlass::gemm::detail::is_k_major<StrideType>()) { // K major thread layout for K major gmem constexpr int threads_major = TileSizeK / Alignment; constexpr int threads_minor = ThreadCount / threads_major; static_assert(threads_major > 0); static_assert(ThreadCount % threads_major == 0); static_assert(threads_minor == 0 || (TileSizeMN % threads_minor == 0)); return make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{}, Layout<Shape <Int<threads_minor>,Int<threads_major>>, Stride<Int<threads_major>, _1>>{}, Layout<Shape<_1,Int<Alignment>>>{}); } else if constexpr (cutlass::gemm::detail::is_mn_major<StrideType>()) { // MN major thread layout for MN major gmem constexpr int threads_major = TileSizeMN / Alignment; constexpr int threads_minor = ThreadCount / threads_major; static_assert(threads_major > 0); static_assert(ThreadCount % threads_major == 0); static_assert(threads_minor == 0 || (TileSizeK % threads_minor == 0)); return make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{}, Layout<Shape <Int<threads_major>,Int<threads_minor>>, Stride< _1,Int<threads_major>>>{}, Layout<Shape<Int<Alignment>,_1>>{}); } else { static_assert(cute::is_void_v<Element>, "Unsupported gmem layout for automatic gmem tiled copy builder."); } } // Helper for SS GMMA smem selection that considers a tensor TileShape: // (BLK_MN, BLK_K) // or hierarchically // ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...)) // and returns the optimal GMMA::Layout that fits BLK_MN0 and BLK_K0 template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K, const bool is_ws_transposed_B = false> constexpr auto rs_smem_selector() { using namespace cute; auto BLK_MN0 = size<0>(BLK_MN{}); auto BLK_K0 = size<0>(BLK_K{}); static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8."); static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8."); if constexpr (major == GMMA::Major::MN) { if constexpr (sizeof(ElementType) == 4){ if constexpr (is_ws_transposed_B) { // only optimized transpositionB(SW32 and SW128 for tf32) can be used, but prefer SW32 due to free bank conflict if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{})"); } } else { // Fall into SW32 due to free bank conflict if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } } // Used for int8, fp8, fp16 and bf16 I/O kernels else if constexpr (sizeof(ElementType) == 1 || sizeof(ElementType) == 2) { if constexpr (sizeof(ElementType) == 1 && is_ws_transposed_B) { // Only optimized transpositionB (SW32 for int8 and fp8) can be used if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_128_Atom<ElementType>{})"); } } else { if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW64_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } } else { static_assert(cutlass::detail::dependent_false<ElementType>, "Smem selector does not support this element type"); } } else if constexpr (major == GMMA::Major::K) { if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW128_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW64_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW32_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_INTER_Atom<ElementType>{}; } else { static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0, "BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})"); } } } // Helper for SS GMMA smem selection that considers a tensor TileShape: // (BLK_MN, BLK_K) // or hierarchically // ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...)) // and returns the largest GMMA::Layout that fits BLK_MN0 and BLK_K0 template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K> CUTE_HOST_DEVICE constexpr auto ss_smem_selector() { using namespace cute; auto BLK_MN0 = size<0>(BLK_MN{}); auto BLK_K0 = size<0>(BLK_K{}); static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8."); static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8."); if constexpr (major == GMMA::Major::MN) { if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW64_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } else if constexpr (major == GMMA::Major::K) { if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW128_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW64_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW32_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_INTER_Atom<ElementType>{}; } else { static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0, "BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})"); } } } template <class ElementA, class ElementB> constexpr bool is_input_size_two_bytes() { return (sizeof(ElementA) == 2 && sizeof(ElementB) == 2); } template <class ElementA, class ElementB> constexpr bool is_input_fp8() { return ((cute::is_same_v<ElementA, float_e4m3_t> || cute::is_same_v<ElementA, float_e5m2_t>) && (cute::is_same_v<ElementB, float_e4m3_t> || cute::is_same_v<ElementB, float_e5m2_t>)); } // We need to handle the tuples in this function since it is used in SFINAE dispatch in the CollectiveBuilder. // At that point, it is not guaranteed that the tuples have been split out into the required parts. template <class MaybeTupleElementA, class LayoutA, class MaybeTupleElementB, class LayoutB> constexpr bool is_use_rmem_A() { using ElementA = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementA>; using ElementB = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementB>; constexpr bool IsABDifferentWidth = cute::sizeof_bits_v<ElementA> != cute::sizeof_bits_v<ElementB>; constexpr bool HasScales = cute::is_tuple<MaybeTupleElementA>::value ^ cute::is_tuple<MaybeTupleElementB>::value; constexpr bool IsInputSizeTwoBytes = is_input_size_two_bytes<ElementA, ElementB>(); constexpr bool IsLayoutAkBk = cutlass::gemm::detail::is_k_major_A<LayoutA>() && cutlass::gemm::detail::is_k_major_B<LayoutB>(); constexpr bool IsUseRmemA = (!IsInputSizeTwoBytes && !IsLayoutAkBk) || IsABDifferentWidth || HasScales; return IsUseRmemA; } template <class ElementA, int AlignmentA, class ElementB, int AlignmentB, int RequiredAlignment> constexpr bool is_aligned() { return ((sizeof(ElementA) * AlignmentA) % RequiredAlignment == 0) && ((sizeof(ElementB) * AlignmentB) % RequiredAlignment == 0); } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective
cutlass/include/cutlass/gemm/collective/builders/sm90_common.inl/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/builders/sm90_common.inl", "repo_id": "cutlass", "token_count": 6169 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Definitions for GEMM structures */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/arch/wmma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { //////////////////////////////////////////////////////////////////////////////// template < typename OperatorClass, typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator > struct DefaultGemmConfiguration; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassSimt, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 128, 8>; using WarpShape = GemmShape<32, 64, 8>; using InstructionShape = GemmShape<1, 1, 1>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementC> struct DefaultGemmConfiguration<arch::OpClassSimt, ArchTag, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 4; static int const kAlignmentB = 4; using ThreadblockShape = GemmShape<128, 128, 32>; using WarpShape = GemmShape<32, 64, 32>; using InstructionShape = GemmShape<1, 1, 4>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 1, int32_t, float >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassWmmaTensorOp, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm70, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; using ThreadblockShape = GemmShape<128, 256, 32>; using WarpShape = GemmShape<64, 64, 32>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value; using ThreadblockShape = GemmShape<128, 256, 32>; using WarpShape = GemmShape<64, 64, 32>; using InstructionShape = GemmShape<16, 8, 8>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = typename platform::conditional< (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint1b_t, uint1b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value; using ThreadblockShape = GemmShape<128, 256, 512>; using WarpShape = GemmShape<64, 64, 512>; using InstructionShape = GemmShape<8, 8, 128>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpXorPopc; }; //////////////////////////////////////////////////////////////////////////////// template <typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 16>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator>; using Operator = typename platform::conditional< (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type; }; //////////////////////////////////////////////////////////////////////////////// template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, double, double, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 128, 16>; using WarpShape = GemmShape<32, 64, 16>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; template <> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, complex<double>, complex<double>, complex<double>, complex<double> > { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<64, 64, 16>; using WarpShape = GemmShape<32, 32, 16>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< complex<double>, 1, complex<double>, complex<double>>; using Operator = arch::OpMultiplyAddComplex; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint1b_t, uint1b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value; using ThreadblockShape = GemmShape<128, 256, 512>; using WarpShape = GemmShape<64, 64, 512>; using InstructionShape = GemmShape<16, 8, 256>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// /// Base configuration for all {fe4m3, fe5m2} x {fe4m3, fe5m2} combinations on SM89 template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfigurationSm89F8 { static_assert((platform::is_same<ElementA, cutlass::float_e4m3_t>::value || platform::is_same<ElementA, cutlass::float_e5m2_t>::value), "ElementA must be of type float_e4m3_t or float_e5m2_t"); static_assert((platform::is_same<ElementB, cutlass::float_e4m3_t>::value || platform::is_same<ElementB, cutlass::float_e5m2_t>::value), "ElementB must be of type float_e4m3_t or float_e5m2_t"); static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; /// Partial specialization for SM89 fe4m3 x fe4m3 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e4m3_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e4m3_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe4m3 x fe5m2 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e4m3_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e4m3_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe5m2 x fe4m3 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e5m2_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e5m2_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe5m2 x fe5m2 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e5m2_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e5m2_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> {}; //////////////////////////////////////////////////////////////////////////////// template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm90, double, double, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; template <> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm90, complex<double>, complex<double>, complex<double>, complex<double> > { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<64, 64, 16>; using WarpShape = GemmShape<32, 32, 16>; using InstructionShape = GemmShape<16, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< complex<double>, 1, complex<double>, complex<double>>; using Operator = arch::OpMultiplyAddComplex; }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/default_gemm_configuration.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/default_gemm_configuration.h", "repo_id": "cutlass", "token_count": 9752 }
29
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a GEMM kernel that computes the absolute maximum of the output tensor and applies additional scaling factors to operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_with_absmax.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/device/gemm_universal_base.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// // Universal GEMM with absolute-maximum calculation and scaling template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassTensorOp, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_ = arch::Sm89, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = cutlass::epilogue::thread::LinearCombinationBiasElementwise< ElementC_, ElementAccumulator_, ElementAccumulator_, ElementC_, ElementC_, 128 / cutlass::sizeof_bits<ElementC_>::value>, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator, /// Complex elementwise transformation on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex elementwise transformation on B operand ComplexTransform TransformB = ComplexTransform::kNone > class GemmUniversalWithAbsMax; // Partial specialization for SM89 template < typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementAccumulator_, typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, typename EpilogueOutputOp_, typename ThreadblockSwizzle_, int Stages, int AlignmentA, int AlignmentB, typename Operator_, ComplexTransform TransformA, ComplexTransform TransformB > class GemmUniversalWithAbsMax< ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, Operator_, TransformA, TransformB > : public GemmUniversalBase< typename kernel::DefaultGemmWithAbsMax< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_ >::GemmKernel > { public: using ElementAccumulator = ElementAccumulator_; using OperatorClass = arch::OpClassTensorOp; using ArchTag = arch::Sm89; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Base = GemmUniversalBase< typename kernel::DefaultGemmWithAbsMax< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, OperatorClass, ArchTag, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_ >::GemmKernel >; using Arguments = typename Base::Arguments; using GemmKernel = typename Base::GemmKernel; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SM89 column-major output exchanges problem size and operand. template < typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename ElementAccumulator_, typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, typename EpilogueOutputOp_, typename ThreadblockSwizzle_, int Stages, int AlignmentA, int AlignmentB, typename Operator_, ComplexTransform TransformA, ComplexTransform TransformB> class GemmUniversalWithAbsMax<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, Operator_, TransformA, TransformB> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = arch::OpClassTensorOp; using ArchTag = arch::Sm89; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using UnderlyingOperator = typename GemmUniversalWithAbsMax< ElementB, typename layout::LayoutTranspose<LayoutB>::type, ElementA, typename layout::LayoutTranspose<LayoutA>::type, ElementC, layout::RowMajor, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, kAlignmentB, kAlignmentA, Operator, kTransformB, kTransformA >::Base; using GemmKernel = typename UnderlyingOperator::GemmKernel; static int const kAlignmentC = EpilogueOutputOp::kCount; /// Argument structure using Arguments = typename UnderlyingOperator::Arguments; private: UnderlyingOperator underlying_operator_; public: /// Constructs the GEMM. GemmUniversalWithAbsMax() { } /// Helper to construct a transposed equivalent for the underying GEMM operator static Arguments to_underlying_arguments(Arguments const &args) { return args.transposed_problem(); } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/gemm_universal_with_absmax.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/gemm_universal_with_absmax.h", "repo_id": "cutlass", "token_count": 4561 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/layout/matrix.h" #include "cutlass/arch/wmma.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_universal.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" #endif //CUTLASS_ARCH_WMMA_ENABLED //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Blas3 computation mode BlasMode BlasMode_ = BlasMode::kSymmetric> struct DefaultRank2K; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Hopper Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/default_rank_2k.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_rank_2k.h", "repo_id": "cutlass", "token_count": 3908 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Gemm kernel with an epilogue defined under the epilogue visitor concept with streamk. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/barrier.h" #include "cutlass/block_striped.h" #include "cutlass/trace.h" #include "cutlass/gemm/kernel/gemm_universal_streamk.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock mapping function > class GemmWithEpilogueVisitorStreamk { public: using Base = GemmUniversalStreamk<Mma_, Epilogue_, ThreadblockSwizzle_>; // // Types and constants // using Mma = Mma_; using Epilogue = Epilogue_; using FusionCallbacks = typename Epilogue::FusionCallbacks; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; /// The per-thread tile of raw accumulators using AccumulatorTile = typename Mma::FragmentC; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Workspace bytes per thread block static size_t const kWorkspaceBytesPerBlock = __NV_STD_MAX( kThreadCount * sizeof(AccumulatorTile), Epilogue::kWorkspaceBytesPerBlock); /// Block-striped reduction utility using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>; // // Structures // using Arguments = typename Base::Arguments; /// Parameters structure struct Params { public: // // Data members // cute::Shape<int32_t,int32_t,int32_t> problem_shape{}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; GemmUniversalMode mode{GemmUniversalMode::kGemm}; ThreadblockSwizzle block_mapping{}; void *barrier_workspace{nullptr}; void *partials_workspace{nullptr}; typename FusionCallbacks::Params output_op{}; void * ptr_D{nullptr}; void * ptr_C{nullptr}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::OutputTileIterator::Params params_C{}; int64_t batch_stride_D{0}; int64_t batch_stride_C{0}; protected: // // Host-only dispatch-utilities // /// Pad the given allocation size up to the nearest cache line static size_t cacheline_align_up(size_t size) { static const int CACHELINE_SIZE = 128; return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; } /// Get the workspace size needed for barrier size_t get_barrier_workspace_size() const { // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, // each reduction block needs its own synchronization flag. int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); } /// Get the workspace size needed for intermediate partial sums size_t get_partials_workspace_size() const { int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); } public: // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a), params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b), params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c), params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d), output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), mode(args.mode), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), barrier_workspace(nullptr), partials_workspace(nullptr) { // Number of SMs to make available for StreamK decomposition int avail_sms = (args.avail_sms == -1) ? device_sms : fast_min(args.avail_sms, device_sms); // Initialize the block mapping structure block_mapping = ThreadblockSwizzle( args.mode, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count, sm_occupancy, device_sms, avail_sms, sizeof(ElementA), sizeof(ElementB), sizeof(ElementC), Epilogue::kAccumulatorFragments); } /// Returns the workspace size (in bytes) needed for these parameters size_t get_workspace_size() const { return get_barrier_workspace_size() + get_partials_workspace_size(); } /// Assign and initialize the specified workspace buffer. Assumes /// the memory allocated to workspace is at least as large as get_workspace_size(). Status init_workspace( void *workspace, cudaStream_t stream = nullptr) { uint8_t *ptr = static_cast<uint8_t*>(workspace); // Establish partials workspace partials_workspace = nullptr; size_t partials_workspace_bytes = get_partials_workspace_size(); if (partials_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } partials_workspace = ptr; ptr += partials_workspace_bytes; } // Establish barrier workspace barrier_workspace = nullptr; size_t barrier_workspace_bytes = get_barrier_workspace_size(); if (barrier_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } barrier_workspace = ptr; ptr += barrier_workspace_bytes; } // Zero-initialize barrier workspace if (barrier_workspace) { size_t barrier_workspace_bytes = get_barrier_workspace_size(); CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); cudaError_t result = cudaMemsetAsync( barrier_workspace, 0, barrier_workspace_bytes, stream); if (result != cudaSuccess) { CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); return Status::kErrorInternal; } } return Status::kSuccess; } /// Returns the GEMM volume in thread block tiles cutlass::gemm::GemmCoord get_tiled_shape() const { return block_mapping.tiled_shape(); } /// Returns the total number of thread blocks to launch int get_grid_blocks() const { dim3 grid_dims = get_grid_dims(); return grid_dims.x * grid_dims.y * grid_dims.z; } /// Returns the grid extents in thread blocks to launch dim3 get_grid_dims() const { return block_mapping.get_grid_dims(); } /// Lightweight update given a subset of arguments. void update(Arguments const &args) { CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()"); // Update input/output pointers ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; batch_stride_D = args.batch_stride_D; output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); } }; struct TileWorkDesc: Base::TileWorkDesc { int k_end; CUTLASS_DEVICE bool tile_finished(Params const &params) { return (k_end == params.block_mapping.problem_size.k()); } }; // using TileWorkDesc = typename Base::TileWorkDesc; using SharedStorage = typename Base::SharedStorage; protected: // // Data members // /// GEMM problem parameters Params params; /// Shared storage reference SharedStorage &shared_storage; /// ID within the threadblock int thread_idx; /// ID of warp int warp_idx; /// ID of each thread within a warp int lane_idx; /// Threadblock scoped epilogue Epilogue epilogue; public: // // Host-only dispatch API // /// Determines whether the GEMM problem size satisfies this kernel's /// alignment requirements static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { return Base::can_implement(problem_size); } /// Determines whether the GEMM problem satisfies this kernel's /// alignment requirements static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } protected: // // Device-only utility methods // /// Iterator for fetching tile fragments from A CUTLASS_DEVICE typename Mma::IteratorA init_iterator_A( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input A matrix ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; } if (mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()]; } int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; int m_end = params.block_mapping.problem_size.m(); return Mma::IteratorA( params.params_A, ptr_A, { m_end, tile_work.k_end }, threadIdx.x, { m_begin, tile_work.k_begin }); } /// Iterator for fetching tile fragments from B CUTLASS_DEVICE typename Mma::IteratorB init_iterator_B( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input B matrix ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; } if (mode == GemmUniversalMode::kArray) { ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()]; } int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; int n_end = params.block_mapping.problem_size.n(); return Mma::IteratorB( params.params_B, ptr_B, { tile_work.k_end, n_end }, threadIdx.x, { tile_work.k_begin, n_begin }); } CUTLASS_DEVICE void init_dp_tile_work( TileWorkDesc &tile_work, int tile_idx) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = 0; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = params.block_mapping.problem_size.k(); // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } CUTLASS_DEVICE void init_sk_tile_work( TileWorkDesc &tile_work, int tile_idx, int block_iter_begin, int block_iter_end) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration for this tile int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); // The first tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_begin = tile_work.iter_begin - tile_iter_begin; // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_end = block_iter_end - tile_iter_begin; // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = k_iter_end - k_iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = k_iter_begin * Mma::Shape::kK; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = min( params.block_mapping.problem_size.k(), // extent of k domain (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } /// Share accumulators with peers CUTLASS_DEVICE void share_accumulators( AccumulatorTile const &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); int accum_tile_offset = first_block_idx * kThreadCount; if (block_idx == first_block_idx) { // First peer initializes the workspace partials BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } else { // Subsequent peers atomically accumulate into the workspace partials if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) { // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); } else { // Turnstile reduction order: wait until the previous peer has written int wait_count = block_idx - first_block_idx; Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); } // Perform reduction in workspace BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } // Signal our arrival Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); } /// Acquire accumulators from peers CUTLASS_DEVICE void acquire_accumulators( AccumulatorTile &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); // Wait for arrival int num_carry_in = block_idx - first_block_idx; Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); // Load and add peer-partials accumulator tile to local accumulator tile int accum_tile_offset = first_block_idx * kThreadCount; BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); } /// Perform epilogue computations and output CUTLASS_DEVICE void do_epilogue( TileWorkDesc &tile_work, AccumulatorTile &accumulator_tile) { cutlass::gemm::GemmCoord threadblock_tile_offset{ tile_work.tiled_coord.m(), tile_work.tiled_coord.n(), tile_work.tiled_coord.k() }; // Execute the epilogue operator to update the destination tensor. epilogue( accumulator_tile, threadblock_tile_offset, params.problem_shape, thread_idx); } CUTLASS_DEVICE void separate_reduction(int reduce_idx) { int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; // Reduce by sk-tile (every tile contributed to by one or more blocks) reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); // Wait for peers to complete int peer_idx_end = peer_idx_last + 1; int num_peers = peer_idx_end - peer_idx_begin; Barrier::wait_eq_reset( params.barrier_workspace, thread_idx, (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, num_peers); /// The location of this tile (in threadblock-tile coordinates) in the output matrix GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); // Execute the epilogue operator to update the destination tensor. epilogue.reduce( peer_idx_begin, peer_idx_end, reduce_fragment_idx, params.partials_workspace, tiled_coord, params.problem_shape, thread_idx); } CUTLASS_DEVICE void process_tile( TileWorkDesc tile_work, int block_idx, int dp_start_block_idx, int block_iter_begin) { // Initialize input iterators typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); // Initialize accumulators AccumulatorTile accumulator_tile; accumulator_tile.clear(); // Initialize MMA abstraction Mma mma( shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Perform this tile's range of multiply-accumulate (MAC) iterations mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || (params.block_mapping.reduction_blocks == 0) || (block_idx >= dp_start_block_idx)) { // // Cooperative SK peer reduction or DP block // int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); if (!tile_work.tile_finished(params)) { // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace share_accumulators(accumulator_tile, block_idx, first_block_idx); } else { // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile if (!tile_work.tile_started()) { // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks acquire_accumulators(accumulator_tile, block_idx, first_block_idx); } do_epilogue(tile_work, accumulator_tile); } } else { // // Separate peer reduction // // Share accumulator partial sums with peer threadblock(s) through scratch workspace epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); // Signal arrival Barrier::arrive_range_inc( params.barrier_workspace, thread_idx, tile_work.tile_idx * Epilogue::kAccumulatorFragments, Epilogue::kAccumulatorFragments); } } /// Executes one GEMM CUTLASS_DEVICE void gemm() { // Initialize block's iteration range int tile_idx = 0; int block_iter_begin = 0; int block_iters_remaining = 0; int block_idx = params.block_mapping.get_block_idx(); int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; // Initialize tile work descriptor TileWorkDesc tile_work; bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); bool sk_block = (block_idx < sk_padding_start_block_idx); bool reduce_block = (block_idx >= reduce_start_block_idx) && (block_idx < grid_padding_start_block_idx) && (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); if (dp_block) { // This is a DP block int dp_block_idx = block_idx - dp_start_block_idx; int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; // Blocks in first DP wave get configured number of tiles tile_idx = first_dp_tile + dp_block_idx; int tile_allottment = params.block_mapping.dp_first_wave_tiles; // Blocks in subsequent DP waves get 1 tile if (dp_block_idx >= params.block_mapping.avail_sms) { tile_allottment = 1; tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; } block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; init_dp_tile_work(tile_work, tile_idx); // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) if ((tile_idx < params.block_mapping.sk_tiles) || (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) { return; } } else if (sk_block) { // This is a SK block int block_iter_end; params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); block_iters_remaining = block_iter_end - block_iter_begin; tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } else { if (reduce_block) { // This is a reduction threadblock int reduce_block_idx = block_idx - reduce_start_block_idx; separate_reduction(reduce_block_idx); } return; } // Iteration-processing loop body CUTLASS_PRAGMA_NO_UNROLL while (true) { // Perform this block's share of work for this tile process_tile( tile_work, block_idx, dp_start_block_idx, block_iter_begin); block_iters_remaining -= tile_work.k_iters_remaining; if (block_iters_remaining == 0) { break; } // Continue to next tile __syncthreads(); if (block_idx >= dp_start_block_idx) { // DP block consume their tiles at stride tile_idx += params.block_mapping.avail_sms; init_dp_tile_work(tile_work, tile_idx); } else { // SK blocks consume their tiles in backwards order tile_idx--; init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } } } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithEpilogueVisitorStreamk op(params, shared_storage); op(); } CUTLASS_DEVICE GemmWithEpilogueVisitorStreamk( Params const &params, SharedStorage &shared_storage) : params(params), shared_storage(shared_storage), thread_idx(threadIdx.x), warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code lane_idx(threadIdx.x % 32), epilogue( params.output_op, shared_storage.epilogue, thread_idx, warp_idx, lane_idx) {} /// Executes one GEMM CUTLASS_DEVICE void operator()() { // Generic SK code path gemm(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h", "repo_id": "cutlass", "token_count": 11099 }
32
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/mma_sm90.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/trace.h" #include "cute/tensor.hpp" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel { namespace detail { // IF_SWAP_AB<T>::value will be true only if: // class T has member SwapAB and T::SwapAB is true template <typename T, typename = void> struct IF_SWAP_AB { static constexpr bool value = false; }; template <typename T> struct IF_SWAP_AB <T, void_t<decltype(T::SwapAB)>> { static constexpr bool value = T::SwapAB; }; } // namespace /////////////////////////////////////////////////////////////////////////////// template < class ProblemShape_, class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_ > class GemmUniversal< ProblemShape_, CollectiveMainloop_, CollectiveEpilogue_, TileScheduler_, cute::enable_if_t<cute::is_base_of_v<KernelTma, typename CollectiveMainloop_::DispatchPolicy::Schedule>>> { public: // // Type Aliases // using ProblemShape = ProblemShape_; static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, "ProblemShape{} should be <M,N,K> or <M,N,K,L>"); // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using ElementB = typename CollectiveMainloop::ElementB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; static_assert(ArchTag::kMinComputeCapability >= 90); // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(cute::is_same_v<ElementAccumulator, typename CollectiveEpilogue::ElementAccumulator>, "Mainloop and epilogue do not agree on accumulator value type."); static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>, "TMA kernel does not support specializing the tile scheduler."); using TileSchedulerTag = TileScheduler_; using TileScheduler = typename detail::TileSchedulerSelector< TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; using TileSchedulerArguments = typename TileScheduler::Arguments; static constexpr int SharedStorageSize = static_cast<int>(cute::max( sizeof(typename CollectiveMainloop::SharedStorage), sizeof(typename CollectiveEpilogue::SharedStorage))); static constexpr uint32_t MaxThreadsPerBlock = CollectiveMainloop::ThreadCount; static constexpr uint32_t MinBlocksPerMultiprocessor = 1; // Device side arguments struct Arguments { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel entry point API struct Params { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopParams mainloop{}; EpilogueParams epilogue{}; }; // // Methods // // Convert to underlying arguments. In this case, a simple copy for the aliased type. static Params to_underlying_arguments(Arguments const& args, void* workspace) { (void) workspace; auto problem_shape = args.problem_shape; if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) { // swap M/N get<0>(problem_shape) = get<1>(args.problem_shape); get<1>(problem_shape) = get<0>(args.problem_shape); } return { args.mode, problem_shape, CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) }; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { bool implementable = (args.mode == GemmUniversalMode::kGemm) or (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); return implementable; } implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); implementable &= TileScheduler::can_implement(args.scheduler); return implementable; } static size_t get_workspace_size(Arguments const& args) { return 0; } static cutlass::Status initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { return Status::kSuccess; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { auto cluster_shape = ClusterShape{}; auto tile_shape = TileShape{}; auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); return TileScheduler::get_tiled_cta_shape_mnl( problem_shape_MNKL, tile_shape, cluster_shape); } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); #else // Preconditions static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); int thread_idx = int(threadIdx.x); int warp_idx = canonical_warp_idx_sync(); int lane_predicate = cute::elect_one_sync(); uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); // Issue Tma Descriptor Prefetch from a single thread if ((warp_idx == 0) && lane_predicate) { CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); } // Separate out problem shape for convenience // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); auto M = get<0>(problem_shape_MNKL); auto N = get<1>(problem_shape_MNKL); auto K = get<2>(problem_shape_MNKL); auto L = get<3>(problem_shape_MNKL); // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) // Get the appropriate blocks for this thread block -- potential for thread block locality auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) auto blk_coord = make_coord(_,_,_); // (m,n,k) -- defer the slice // Make tiled views Tensor gA_mkl = local_tile(mA_mkl, blk_shape, blk_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) Tensor gB_nkl = local_tile(mB_nkl, blk_shape, blk_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) // Compute m_coord, n_coord, and l_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); auto output_tile_coord = make_coord(m_coord, n_coord, _, l_coord); // Slice with m_coord and n_coord Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) // Allocate the tiled_mma and the accumulators for the (M,N) blk_shape TiledMma tiled_mma; Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); auto k_tile_count = size<2>(gA); // Perform the collective scoped MMA CollectiveMainloop collective_mma; collective_mma( gA, params.mainloop.tma_load_a, gB, params.mainloop.tma_load_b, accumulators, k_tile_iter, k_tile_count, thread_idx, block_rank_in_cluster, smem_buf, params.mainloop ); constexpr int BLK_M_RANK = cute::rank<0>(blk_shape); bool m_oob = int(blockIdx.x) >= size<2>(gA_mkl); auto m_max_coord = unwrap(cute::transform(make_seq<BLK_M_RANK>{}, [&](auto i) { return m_oob ? 0 : get<i>(M) - get<0,i>(blk_shape) * get<i>(m_coord); })); constexpr int BLK_N_RANK = cute::rank<1>(blk_shape); bool n_oob = int(blockIdx.y) >= size<2>(gB_nkl); auto n_max_coord = unwrap(cute::transform(make_seq<BLK_N_RANK>{}, [&](auto i) { return n_oob ? 0 : get<i>(N) - get<1,i>(blk_shape) * get<i>(n_coord); })); auto residue_mnk = make_tuple(m_max_coord, n_max_coord, Int<0>{}); // Epilogue and write to gD CollectiveEpilogue epilogue{params.epilogue}; epilogue( problem_shape_MNKL, blk_shape, output_tile_coord, accumulators, tiled_mma, residue_mnk, thread_idx, smem_buf ); #endif } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel
cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp", "repo_id": "cutlass", "token_count": 4951 }
33
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief Parameters structures for persistent tile schedulers */ #include "cutlass/coord.h" #include "cutlass/kernel_hardware_info.h" #include "cutlass/workspace.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" #include "cutlass/gemm_coord.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { namespace detail { //////////////////////////////////////////////////////////////////////////////// // // Parameters for SM90 tile schedulers // // Parameters for SM90 persistent tile scheduler struct PersistentTileSchedulerSm90Params { enum class RasterOrder { AlongM, AlongN }; enum class RasterOrderOptions { Heuristic, AlongM, AlongN }; FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_batch_{}; FastDivmodU64 divmod_cluster_blk_major_{}; uint64_t blocks_per_problem_ = 0; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; uint32_t problem_tiles_m_ = 0; uint32_t problem_tiles_n_ = 0; uint32_t problem_tiles_l_ = 0; uint32_t cluster_shape_m_ = 0; uint32_t cluster_shape_n_ = 0; // Initializes members. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. void initialize( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); return initialize( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option ); } // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { CUTLASS_UNUSED(hw_info); // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); problem_tiles_m_ = problem_blocks_m / cluster_shape.m(); problem_tiles_n_ = problem_blocks_n / cluster_shape.n(); problem_tiles_l_ = problem_blocks.z; cluster_shape_m_ = cluster_shape.m(); cluster_shape_n_ = cluster_shape.n(); RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); // // Set members // blocks_per_problem_ = problem_blocks_m * problem_blocks_n * problem_blocks.z; log_swizzle_size_ = log_swizzle_size; raster_order_ = raster_order; divmod_batch_ = FastDivmodU64(problem_blocks_m * problem_blocks_n); if (raster_order == RasterOrder::AlongN) { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_n / cluster_shape.n()); } else { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_m / cluster_shape.m()); } } // Given the inputs, computes the physical grid we should launch. // This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); return get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option, truncate_by_problem_size ); } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { int const sm_count = hw_info.sm_count; // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); dim3 launch_grid; if (raster_order == RasterOrder::AlongN) { launch_grid = dim3(cluster_shape.m(), 1, 1); } else { launch_grid = dim3(1, cluster_shape.n(), 1); } auto possibly_truncate = [&](int x, int y) { if (truncate_by_problem_size) { return platform::min(x, y); } else { return x; } }; // The else path is generic, however, we can avoid some divs if we know cluster size is 1 auto cluster_size = cluster_shape.m() * cluster_shape.n(); if (cluster_size == 1) { if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); } else { launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); } } else { /* * Optimal grid size calculation is based on * GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU * Hence, maximum SMs per GPC = 18 */ constexpr int max_sm_per_gpc = 18; // Provided SM count could possibly be less than the assumed maximum SMs per GPC auto cluster_size = cluster_shape.m() * cluster_shape.n(); int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; // The calculation below allows for larger grid size launch for different GPUs. int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); cta_per_device += max_cta_occupancy_per_residual_gpc; cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate( cta_per_device / cluster_shape.m(), problem_blocks_total / cluster_shape.m()); } else { launch_grid.x = possibly_truncate( cta_per_device / cluster_shape.n(), problem_blocks_total / cluster_shape.n()); } } return launch_grid; } CUTLASS_HOST_DEVICE static int32_t get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { int min_cta_dim = platform::min(problem_ctas_m, problem_ctas_n); if (max_swizzle_size >= 8 && min_cta_dim >= 6) { return 3; } else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { return 2; } else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { return 1; } else { return 0; } } CUTLASS_HOST_DEVICE static RasterOrder get_rasterization_order( uint32_t tiles_m, uint32_t tiles_n, RasterOrderOptions raster_order_option ) { if (raster_order_option == RasterOrderOptions::Heuristic) { if (tiles_n > tiles_m) { return RasterOrder::AlongM; } else { return RasterOrder::AlongN; } } else { switch (raster_order_option) { case RasterOrderOptions::AlongN: return RasterOrder::AlongN; break; default: return RasterOrder::AlongM; } } } // Get the number of CTA tiles in this problem. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape) { auto cta_m = (problem_shape.m() + cta_shape.m() - 1) / cta_shape.m(); auto cta_n = (problem_shape.n() + cta_shape.n() - 1) / cta_shape.n(); return get_tiled_cta_shape_mnl(problem_shape, cluster_shape, cta_m, cta_n); } // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { // Round up to nearest multiple of cluster dim along each mode auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); return { static_cast<uint32_t>(problem_blocks_m), static_cast<uint32_t>(problem_blocks_n), static_cast<uint32_t>(problem_shape.batch()) }; } }; //////////////////////////////////////////////////////////////////////////////// // Parameters for SM90 persistent stream-K scheduler struct PersistentTileSchedulerSm90StreamKParams { // Strategies for computing reductions between CTAs computing portions of a given output tile enum class ReductionMode { // Participating CTAs perform reduction in a turnstile fashion in order of the K extent // covered by each CTA. This requires a lock to be held exclusively be the CTA that is // currently accumulating. // // Turnstile accumulation ensures deterministic numeric behavior when using this mode. Deterministic, // Participating CTAs perform reduction atomically to the same workspace (mostly) without locking. // Locks are used only to wait for the first CTA to write its partial values (to initialize the // workspace), and for all but the final CTA to have accumulated (so that the final CTA can load // the accumulated value and accumulate it into registers on top of which the epilogue will // be performed). // // Due to the nondeterminsitic ordering of accumulation, deterministic numeric behavior cannot // be guaranteed with this mode (e.g., floating-point rounding error will depend on the order // of accumulation) Nondeterministic }; // Strategies for decomposing the problem enum class DecompositionMode { // Use a heuristic to determine whether data-parallel, split-K, or stream-K decomposition should be performed Heuristic, // Force a data-parallel decomposition DataParallel, // Force a split-K decomposition. This should be paired with setting the `splits` parameter SplitK, // Force a stream-K decomposition StreamK }; using UnderlyingParams = PersistentTileSchedulerSm90Params; using RasterOrder = UnderlyingParams::RasterOrder; using RasterOrderOptions = UnderlyingParams::RasterOrderOptions; // Cluster dimensions are typically always a power of 2, so use // the power-of-two variants of FastDivmod for these. FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_batch_{}; FastDivmodU64 divmod_cluster_blk_major_{}; // Total number of cluster-sized output tiles (i.e., not including any // splitting factors). This is primarily used for split-K decompositions, // and may be overridden in other decompositions. FastDivmodU64 divmod_clusters_mnl_{}; // We divide up the number of stream-K tiles amongst G groups of stream-K units. // The stream-K units within a group collaborate to comptue over the `sk_tiles / G` // tiles assigned to that group. Non-unit group sizes can help to preserve L2 locality of // partial chunks computed by stream-K units -- units 0 in each group will compute identical K extents // of tiles that would be assigned in the same wave according to the rasterization order of the // data-parallel formulation of the problem. FastDivmodU64 divmod_sk_groups_{}; // Number of stream-K units in each group FastDivmodU64 divmod_sk_units_per_group_{}; uint64_t units_per_problem_ = 0; FastDivmod divmod_tiles_per_output_tile_{}; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; // The splitting factor to be used in a split-K decomposition of the problem. // If this is set to a value greater than 1, stream-K decomposition logic // is bypassed in favor of a split-K decomposition. uint32_t splits_ = 1; // Number of stream-K or split-K work units that compute an extra k iteration. // This is done to handle residuals in dividing up the k iteration space. // For stream-K, since the actual assignment of work to stream-K units will be done // at the granularity of a cluster, we store only the number of big clusters. uint32_t big_units_ = 0; // The number of groups of stream-K units that will process an extra stream-K tile cluster. uint32_t big_groups_ = 0; // Workspace for holding partial accumulators to be reduced across stream-K/split-K units void* reduction_workspace_ = nullptr; // Number of tiles covered by stream-K work units uint32_t sk_tiles_ = 0; // Number of work units computing stream-K tiles uint32_t sk_units_ = 0; // Number of tiled k iterations computed by each stream-K work unit. This // can potentially cover more than one output tile. uint32_t k_tiles_per_sk_unit_ = 0; // Strategy to use when reducing between collaborating CTAs ReductionMode reduction_mode_ = ReductionMode::Deterministic; // The number of sub blocks in the kernel epilogue FastDivmodU64 divmod_epilogue_subtile_{}; // The number of blocks that launched for doing separate reduction uint32_t separate_reduction_units_ = 0; // Minimum number of k tiles that can be assigned to a stream-K unit static constexpr uint32_t min_iters_per_sk_unit_ = 8u; // Maximum number of groups of stream-K units static constexpr uint32_t max_sk_groups_ = 8u; // Divides dividend by the cluster size CUTLASS_HOST_DEVICE uint64_t div_cluster_size(uint64_t dividend) const { // Use each underlying fast divmod rather than performing integer division // by the multiplication of major.divisor * minor.divisor return divmod_cluster_shape_minor_.divide( divmod_cluster_shape_major_.divide(dividend) ); } CUTLASS_HOST_DEVICE uint64_t get_cluster_size() const { return divmod_cluster_shape_minor_.divisor * divmod_cluster_shape_major_.divisor; } // Returns whether the kernel uses separate reduction CUTLASS_HOST_DEVICE bool requires_separate_reduction() const { return separate_reduction_units_ > 0; } // Returns the maximum number of peers that can collaborate on a given output tile CUTLASS_HOST_DEVICE static uint32_t max_peers_per_tile(uint64_t sk_units, uint64_t sk_tiles) { // When we can divide up our SK units to SK tiles evenly, the number of peers // per SK tile is exactly (sk_units_ / sk_tiles_). In cases where this division // is not exact, some tiles will need to be covered by additional SK units. Because // the extra work can occur at both the beginning and the end of the SK tile, at // most 2 extra peers will be needed. return static_cast<uint32_t>(sk_units / sk_tiles + 2); } // Initializes members. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. void initialize( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, ReductionMode reduction_mode, DecompositionMode decomposition_mode, void* workspace, const uint32_t epilogue_subtile = 1 ) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl( problem_shape, tile_shape, cluster_shape); // Number of k tiles in each output tile uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); initialize( problem_blocks, k_tiles_per_output_tile, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, reduction_mode, decomposition_mode, workspace, epilogue_subtile ); } // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, ReductionMode reduction_mode, DecompositionMode decomposition_mode, void* workspace, const uint32_t epilogue_subtile = 1 ) { UnderlyingParams underlying_params; underlying_params.initialize( problem_blocks, cluster_shape, hw_info, max_swizzle, raster_order_option ); auto problem_blocks_l = problem_blocks.z; auto problem_blocks_m = round_up(problem_blocks.x, (1 << underlying_params.log_swizzle_size_) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << underlying_params.log_swizzle_size_) * cluster_shape.n()); uint64_t output_tiles = problem_blocks_m * problem_blocks_n * problem_blocks_l; // Reduction workspace is at the beginning of the workspace. Lock workspace follows. void* reduction_workspace = workspace; if (decomposition_mode == DecompositionMode::SplitK || (decomposition_mode == DecompositionMode::Heuristic && splits > 1)) { // Short circuit to basic split-K decomposition // Don't split by more than the available number of SMs if (splits > hw_info.sm_count) { splits = hw_info.sm_count; } // Don't split by more than the K tile iterations // // splits is almost certainly nonnegative here (e.g., hw_info.sm_count, // despite being an int, is a count), so it can safely be converted to unsigned // in the comparison to avoid a signed-unsigned comparison warning-as-error. if (static_cast<decltype(k_tiles_per_output_tile)>(splits) > k_tiles_per_output_tile) { splits = k_tiles_per_output_tile; } set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, splits, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } // Calculate the maximum number of blocks from clusters of shape cluster_shape that we // can fit within sm_count SMs. dim3 grid = get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle, raster_order_option ); uint64_t ctas_per_wave = grid.x * grid.y; auto cluster_size = cluster_shape.m() * cluster_shape.n(); // The number of output tiles to be computed in stream-K and data-parallel fashion, respectively. uint32_t sk_tiles = get_num_sk_tiles( output_tiles, ctas_per_wave, cluster_size, k_tiles_per_output_tile, decomposition_mode ); uint64_t dp_tiles = output_tiles - sk_tiles; // Calculate the number of work units covering the data-parallel and stream-K tiles. // A "work unit" is a single index in the linearized ID space used by the scheduler. // We distinguish it from a "block," which is typically tied to a hardware unit // (e.g., the callers into this scheduler will be persistent thread blocks). // A work unit can encompass multiple output tiles worth of work (as will be the // case for stream-K blocks). // Since splitting is not required for data-parallel tiles, only one data-parallel unit // is needed per data-parallel tile. uint64_t dp_units = dp_tiles; uint64_t ctas_per_sk_wave = ctas_per_wave; uint64_t sk_units = get_num_sk_units(cluster_shape, ctas_per_sk_wave, sk_tiles, k_tiles_per_output_tile); if (decomposition_mode == DecompositionMode::DataParallel || (decomposition_mode == DecompositionMode::Heuristic && sk_tiles == 0) || sk_units == 0) { // Short circuit to basic data-parallel decomposition set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, /* splits = */ 1, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } bool do_separate_reduction = should_perform_separate_reduction( epilogue_subtile, sk_units, sk_tiles, dp_tiles, ctas_per_wave); // Determine the number of stream-K groups that will be used. We currently use // max_sk_groups_ unless this extends beyond the extent of the dimension over // which the problem is rasterized. For example, if the tiled problem shape // (in CTA_M x CTA_N representation) when using 1x1 clusters is 4x16, // and we rasterize along the M dimension, we choose 4 groups, rather than 8. // If the cluster shape is 2x1, we choose 2 groups (CTA_M / CLUSTER_M). uint32_t max_groups_problem; if (underlying_params.raster_order_ == RasterOrder::AlongM) { max_groups_problem = problem_blocks_m / cluster_shape.m(); } else { max_groups_problem = problem_blocks_n / cluster_shape.n(); } // Select the number of groups that will be use. We start with the maximum // number of potential groups, and iterate down looking for a group size that // evenly divides the stream-K units and tiles, and for which the resulting // number of K tiles per stream-K unit remains above min_iters_per_sk_unit_ uint32_t groups = platform::min(max_groups_problem, uint32_t(max_sk_groups_)); // Grouping is disabled when separate reduction is used if (do_separate_reduction) { groups = 1; } uint32_t fallback_groups = 0; auto sk_cluster_tiles = sk_tiles / cluster_size; auto sk_cluster_units = sk_units / cluster_size; auto sk_splits_too_small = [&](uint32_t g) { // Check whether the number of K tiles computed per stream-K unit is less // than min_iters_per_sk_unit_ auto total_sk_k_tiles = (sk_tiles / g) * k_tiles_per_output_tile; auto k_tiles_per_sk_unit = total_sk_k_tiles / (sk_units / g); return k_tiles_per_sk_unit < min_iters_per_sk_unit_; }; auto is_ideal_grouping = [&](uint32_t g) { // An ideal grouping will evenly divide stream-K clusters, evenly divide // stream-K tiles, and not result in stream-K splits that are too small. return (sk_cluster_units % g == 0) && (sk_cluster_tiles % g == 0) && !sk_splits_too_small(g); }; auto is_valid_grouping = [&](uint32_t g) { // A grouping is valid, but not ideal, if it evenly divides the // stream-K clusters and does not result in stream-K splits that are // too small. Such a setting can be used as a fallback option in the // case that an ideal grouping is not achievable return sk_cluster_units % g == 0 && !sk_splits_too_small(g); }; while (groups > 1 && !is_ideal_grouping(groups)) { if (fallback_groups == 0 && is_valid_grouping(groups)) { // Set fallback groups once in preference for a larger number of groups. fallback_groups = groups; } --groups; } // If groups == 1, we did not find a group count that satisfies all criteria. If we have // found a fallback group count, use this instead. if (groups == 1 && fallback_groups > 0) { groups = fallback_groups; } auto sk_units_per_group = sk_units / groups; // sk_tiles is guaranteed to be divisible by cluster_size because it is calculated as: // sk_tiles = (waves <= 2) ? total_tiles : (sm_count + (total_tiles % sm_count)) // Both total_tiles and sm_count are multiples of cluster size due to padding added // prior to kernel launch. uint64_t sk_clustered_tiles = sk_tiles / cluster_size; uint64_t sk_clustered_tiles_per_group = sk_clustered_tiles / groups; uint64_t sk_tiles_per_group = sk_clustered_tiles_per_group * cluster_size; // Groups that will process an extra stream-K tile cluster. These differ from "big_units," which // are stream-K units within a group that process an extra K chunk. uint64_t sk_big_groups = sk_clustered_tiles % groups; uint64_t k_tiles_per_group = k_tiles_per_output_tile * sk_tiles_per_group; // Number of k tiles computed per stream-K unit uint64_t k_tiles_per_sk_unit = k_tiles_per_group / sk_units_per_group; uint32_t reduction_units = 0; // Use separate reduction when we have less than one wave of output tiles (dp_tiles == 0) // and when each tile will be operated on by at least two stream-K units (sk_units > 2 * sk_tiles) if (do_separate_reduction) { // Each reduction unit will reduce the partials of an epilogue subtile for // a given output tile and compute the epilogue. Thus, there are as many reduction // units as there are epilogue subtiles. reduction_units = sk_tiles * epilogue_subtile; } else if (decomposition_mode == DecompositionMode::Heuristic && sk_tiles < sk_units && sk_units % sk_tiles == 0) { // If the number of stream-K units is a multiple of the number of stream-K tiles, then // the problem can leverage a basic split-K decomposition for the stream-K tiles. // This case happens when separate reduction is disable. uint32_t sk_splits = static_cast<uint32_t>(sk_units / sk_tiles); set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, sk_splits, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; divmod_batch_ = underlying_params.divmod_batch_; divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; divmod_sk_groups_ = FastDivmodU64(static_cast<uint64_t>(groups)); divmod_sk_units_per_group_ = FastDivmodU64(static_cast<uint64_t>(sk_units / groups)); // Override divmod_clusters_mnl_ to be the number of cluster-sized stream-K units. // This setting ensures that the use of this divmod for stream-K decompositions // is essentially a no-op. divmod_clusters_mnl_ = FastDivmodU64(sk_units / cluster_size); splits_ = 1; log_swizzle_size_ = underlying_params.log_swizzle_size_; units_per_problem_ = static_cast<uint32_t>(dp_units + sk_units); raster_order_ = underlying_params.raster_order_; // Assign big_units_ assuming that group count == 1. This is unused by stream-K // when group count > 1. big_units_ = static_cast<uint32_t>(k_tiles_per_group % k_tiles_per_sk_unit); big_groups_ = static_cast<uint32_t>(sk_big_groups); reduction_workspace_ = reduction_workspace; sk_tiles_ = sk_tiles; sk_units_ = static_cast<uint32_t>(sk_units); k_tiles_per_sk_unit_ = static_cast<uint32_t>(k_tiles_per_sk_unit); reduction_mode_ = reduction_mode; divmod_epilogue_subtile_ = FastDivmodU64(epilogue_subtile); separate_reduction_units_ = reduction_units; } // Given the inputs, computes the physical grid we should launch. // This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); return get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option ); } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { // Call into the underlying get_grid_shape method, but do not allow the grid shape returned // to be truncated based on the number of output tiles in the problem. return UnderlyingParams::get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option, /* truncate_by_problem_size = */false ); } // Returns the number of stream-K tiles that will be computed amongst `output_tiles` total // output tiles on a device with `ctas_per_wave` CTAs in each wave. static uint32_t get_num_sk_tiles( uint64_t output_tiles, uint64_t ctas_per_wave, uint64_t cluster_size, uint32_t k_tiles_per_output_tile, DecompositionMode decomposition_mode ) { uint32_t full_waves = static_cast<uint32_t>(output_tiles / ctas_per_wave); uint32_t total_waves = static_cast<uint32_t>((output_tiles + ctas_per_wave - 1) / ctas_per_wave); if (decomposition_mode == DecompositionMode::DataParallel || decomposition_mode == DecompositionMode::SplitK) { return 0; } // If there is wave quantization, assign the first two waves worth of tiles to be // covered by stream-K work and the remainder to be data-parallel. Since we know // that full_waves == total_waves - 1 in this case, the number of data-parallel // waves is simply full_waves-1 (unless full_waves == 0). uint32_t dp_waves = full_waves > 1 ? full_waves - 1 : 0; uint64_t dp_tiles = dp_waves * ctas_per_wave; uint64_t sk_tiles = output_tiles - dp_tiles; if (decomposition_mode == DecompositionMode::Heuristic) { if (full_waves == total_waves || k_tiles_per_output_tile <= min_iters_per_sk_unit_) { // All tiles will be data-parallel tiles if there is either no quantization // or if there is no work to be split. return 0; } // // The final wave is not full. Perform some stream-K work. // // Rudimentary heuristic: prefer data-parallel decomposition if we have more than // one wave and the tail wave is more than half full. This is subject to change. uint64_t tail_tiles = output_tiles - (full_waves * ctas_per_wave); if (2 * tail_tiles >= ctas_per_wave) { return 0; } } return static_cast<uint32_t>(sk_tiles); } CUTLASS_HOST_DEVICE static uint64_t get_num_sk_units(GemmCoord cluster_shape, uint64_t ctas_per_sk_wave, uint32_t sk_tiles, uint32_t k_tiles_per_output_tile) { // If there are stream-K tiles to compute and a sufficiently large number of k iterations // across them, they will be covered by a single wave of persistent threadblocks. Thus, there // will be as many work units as there are threadblocks in a single wave. // // When the total k iterations across stream-K tiles is too small to justify distributing // across an entire wave of blocks, we instead distribute the iterations over a smaller // set of blocks. // Calculate the number of stream-K units that would be needed if each stream-K unit // computed the minimum allowable k iterations. Truncate this to be in units of clusters. // Number of k iterations computed by the stream-K units as a whole uint64_t k_tiles_sk_total = k_tiles_per_output_tile * sk_tiles; // Calculate the number of stream-K units that would be needed if each stream-K unit // computed the minimum allowable k iterations. Truncate this to be in units of clusters. auto cluster_size = cluster_shape.m() * cluster_shape.n(); uint64_t min_sized_sk_units = (k_tiles_sk_total / min_iters_per_sk_unit_); min_sized_sk_units = (min_sized_sk_units / cluster_size) * cluster_size; uint64_t sk_units = platform::min(ctas_per_sk_wave, min_sized_sk_units); return sk_units; } // Calculates the size of the workspace needed for holding reduction barriers CUTLASS_HOST_DEVICE static int get_barrier_workspace_size(uint64_t num_tiles, uint32_t mma_warp_groups, uint32_t barrier_bits) { auto workspace_bits = num_tiles * mma_warp_groups * barrier_bits; return round_up_to_l2_alignment(bits_to_bytes(static_cast<int>(workspace_bits))); } // Calculates the size of the workspace needed for holding partial outputs from splits CUTLASS_HOST_DEVICE static int get_reduction_workspace_size(uint64_t num_tiles, GemmCoord tile_shape, uint32_t accumulator_bits, uint32_t num_accumulator_mtxs = 1) { auto output_tile_size = tile_shape.m() * tile_shape.n(); auto workspace_bits = accumulator_bits * output_tile_size * num_tiles * num_accumulator_mtxs; return round_up_to_l2_alignment(bits_to_bytes(static_cast<int>(workspace_bits))); } #if !defined(__CUDACC_RTC__) static void get_workspace_component_sizes( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, int& barrier_workspace_size, int& reduction_workspace_size, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { auto log_swizzle_size = UnderlyingParams::get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle); problem_blocks.x = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); problem_blocks.y = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); // Workspace is needed only for output tiles that will be split. Thus, we first determine the number // of output tiles that will be split, and then calculate the workspace needed to cover these. uint64_t output_tiles = problem_blocks.x * problem_blocks.y * problem_blocks.z; if (decomposition_mode == DecompositionMode::DataParallel) { barrier_workspace_size = 0; reduction_workspace_size = 0; } else if (decomposition_mode == DecompositionMode::SplitK || (decomposition_mode == DecompositionMode::Heuristic && splits > 1)) { // Basic split-K variant requires workspace for all output tiles barrier_workspace_size = get_barrier_workspace_size(output_tiles, mma_warp_groups, barrier_bits); reduction_workspace_size = get_reduction_workspace_size(output_tiles, tile_shape, accumulator_bits, num_accumulator_mtxs); } else { KernelHardwareInfo new_hw_info; new_hw_info.device_id = hw_info.device_id; new_hw_info.sm_count = hw_info.sm_count; if (new_hw_info.sm_count <= 0) { CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); new_hw_info.sm_count = KernelHardwareInfo::query_device_multiprocessor_count(new_hw_info.device_id); } dim3 grid = get_grid_shape( problem_blocks, cluster_shape, new_hw_info, max_swizzle, raster_order_option ); uint64_t ctas_per_wave = grid.x * grid.y; uint64_t cluster_size = cluster_shape.m() * cluster_shape.n(); uint32_t sk_tiles = get_num_sk_tiles( output_tiles, ctas_per_wave, cluster_size, static_cast<uint32_t>(k_tiles_per_output_tile), decomposition_mode ); uint64_t ctas_per_sk_wave = ctas_per_wave; uint64_t sk_units = get_num_sk_units(cluster_shape, ctas_per_sk_wave, sk_tiles, k_tiles_per_output_tile); uint64_t dp_tiles = output_tiles - sk_tiles; uint64_t reduction_tiles = sk_tiles; if (should_perform_separate_reduction(epilogue_subtile, sk_units, sk_tiles, dp_tiles, ctas_per_wave)) { // In separate reduction, each peer writes to its own location in scratch space. // Thus, for separate reduction, we need as many reduction tiles per output tile // as there are the maximum number of peers that can collaborate on an output tile. reduction_tiles *= max_peers_per_tile(sk_units, sk_tiles); } // Though separate reduction requires a larger reduction workspace, only one barrier // is needed per output tile. Each peer will increment the barrier by one once the peer has // written its accumulator to scratch space. The separate reduction unit will only begin // performing the reduction when the barrier has reached the number of peers for the output tile. barrier_workspace_size = get_barrier_workspace_size(sk_tiles, mma_warp_groups, barrier_bits); reduction_workspace_size = get_reduction_workspace_size(reduction_tiles, tile_shape, accumulator_bits, num_accumulator_mtxs); } } #endif // !defined(__CUDACC_RTC__) // Returns whether the kernel is configured in a manner for which separate reduction should be used CUTLASS_HOST_DEVICE static bool should_perform_separate_reduction(uint32_t epilogue_subtile, uint64_t sk_units, uint64_t sk_tiles, uint64_t dp_tiles, uint64_t ctas_per_wave) { // We perform separate reduction if we have fewer than one wave of output tiles // and each output tile is covered by at least to stream-K units. When sk_units is // multiple of sk_tiles, will choose basic split-k path instead of separate reduction for now. return (epilogue_subtile != 1) && (dp_tiles == 0) && (sk_units > 2u * sk_tiles) && (sk_units + sk_tiles * epilogue_subtile <= ctas_per_wave); } // Get the amount of scratch workspace needed for the kernel. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. static size_t get_workspace_size( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile, uint32_t num_accumulator_mtxs) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); return get_workspace_size( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); } // Version of get_workspace_size that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. static size_t get_workspace_size( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { int barrier_workspace_size = 0; int reduction_workspace_size = 0; #if !defined(__CUDACC_RTC__) get_workspace_component_sizes( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, barrier_workspace_size, reduction_workspace_size, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); #endif return barrier_workspace_size + reduction_workspace_size; } // Initialize the workspace to be used for the kernel. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. static cutlass::Status initialize_workspace( void* workspace, cudaStream_t stream, BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); return initialize_workspace( workspace, stream, problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile ); } // Version of initialize_workspace that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. static cutlass::Status initialize_workspace( void* workspace, cudaStream_t stream, dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { #if !defined(__CUDACC_RTC__) int barrier_workspace_size = 0; int reduction_workspace_size = 0; get_workspace_component_sizes( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, barrier_workspace_size, reduction_workspace_size, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); if (barrier_workspace_size > 0) { if (workspace == nullptr) { return Status::kErrorWorkspaceNull; } // Only the barrier workspace needs to be cleared for stream-K. // Barrier workspace follows reduction workspace. uint8_t* barrier_workspace = reinterpret_cast<uint8_t*>(workspace) + reduction_workspace_size; return zero_workspace(static_cast<void*>(barrier_workspace), barrier_workspace_size, stream); } #endif // !defined(__CUDACC_RTC__) return Status::kSuccess; } void set_params_basic( UnderlyingParams const& underlying_params, uint32_t blocks_m, uint32_t blocks_n, uint32_t blocks_l, uint32_t splits, uint32_t k_tiles_per_output_tile, void* reduction_workspace, ReductionMode reduction_mode) { divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; divmod_batch_ = FastDivmodU64(blocks_m * blocks_n); divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); divmod_sk_groups_ = FastDivmodU64(1u); auto cluster_size = underlying_params.divmod_cluster_shape_major_.divisor * underlying_params.divmod_cluster_shape_minor_.divisor; divmod_clusters_mnl_ = FastDivmodU64((blocks_m * blocks_n * blocks_l) / cluster_size); splits_ = splits; divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; log_swizzle_size_ = underlying_params.log_swizzle_size_; units_per_problem_ = blocks_m * blocks_n * blocks_l; raster_order_ = underlying_params.raster_order_; big_units_ = k_tiles_per_output_tile % splits; reduction_workspace_ = reduction_workspace; reduction_mode_ = reduction_mode; k_tiles_per_sk_unit_ = k_tiles_per_output_tile / splits; // No stream-K work is performed for "basic" data-parallel and split-K decompositions sk_tiles_ = 0; sk_units_ = 0; divmod_sk_units_per_group_ = FastDivmodU64(1u); separate_reduction_units_ = 0; } private: // Round up number of bytes to the nearest multiple of L2 cache line alignment CUTLASS_HOST_DEVICE static int round_up_to_l2_alignment(int bytes) { constexpr static uint32_t L2CacheLineSizeBytes = 128; return (bytes + L2CacheLineSizeBytes - 1) / L2CacheLineSizeBytes * L2CacheLineSizeBytes; } }; //////////////////////////////////////////////////////////////////////////////// // Parameters for SM90 persistent group scheduler (only used for Grouped Gemms) template<class ProblemShape> struct PersistentTileSchedulerSm90GroupParams { enum class RasterOrder { AlongM, AlongN }; enum class RasterOrderOptions { Heuristic, AlongM, AlongN }; FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_cta_shape_m_{}; FastDivmodU64 divmod_cta_shape_n_{}; uint64_t blocks_across_problem_ = 0; bool pre_processed_problem_shapes = true; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; int32_t groups_ = 0; ProblemShape* problem_shapes_ = nullptr; GemmCoord cta_shape_; GemmCoord cluster_shape_; // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, int32_t groups, ProblemShape* problem_shapes, ProblemShape const* host_problem_shapes, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { CUTLASS_UNUSED(hw_info); // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); // // Set members // groups_ = groups; problem_shapes_ = problem_shapes; cta_shape_ = cta_shape; cluster_shape_ = cluster_shape; blocks_across_problem_ = problem_blocks.x * problem_blocks.y * problem_blocks.z; pre_processed_problem_shapes = (host_problem_shapes == nullptr) ? false : true; log_swizzle_size_ = log_swizzle_size; raster_order_ = raster_order; if (raster_order == RasterOrder::AlongN) { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.m()); } else { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.n()); } divmod_cta_shape_m_ = FastDivmodU64(cta_shape_.m()); divmod_cta_shape_n_ = FastDivmodU64(cta_shape_.n()); } // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { // Round up to nearest multiple of cluster dim along each mode auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); return { static_cast<uint32_t>(cta_m), static_cast<uint32_t>(cta_n), static_cast<uint32_t>(1) // Only a single batch per group is currently supported }; } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { int const sm_count = hw_info.sm_count; // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); dim3 launch_grid; if (raster_order == RasterOrder::AlongN) { launch_grid = dim3(cluster_shape.m(), 1, 1); } else { launch_grid = dim3(1, cluster_shape.n(), 1); } auto possibly_truncate = [&](int x, int y) { if (truncate_by_problem_size) { return platform::min(x, y); } else { return x; } }; // The else path is generic, however, we can avoid some divs if we know cluster size is 1 auto cluster_size = cluster_shape.m() * cluster_shape.n(); if (cluster_size == 1) { if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); } else { launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); } } else { // Optimal grid size calculation is based on // GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU // Hence, maximum SMs per GPC = 18 constexpr int max_sm_per_gpc = 18; // Provided SM count could possibly be less than the assumed maximum SMs per GPC auto cluster_size = cluster_shape.m() * cluster_shape.n(); int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; // The calculation below allows for larger grid size launch for different GPUs. int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); cta_per_device += max_cta_occupancy_per_residual_gpc; cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate( cta_per_device / cluster_shape.m(), problem_blocks_total / cluster_shape.m()); } else { launch_grid.x = possibly_truncate( cta_per_device / cluster_shape.n(), problem_blocks_total / cluster_shape.n()); } } return launch_grid; } CUTLASS_HOST_DEVICE static int32_t get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { int min_cta_dim = platform::min(problem_ctas_m, problem_ctas_n); if (max_swizzle_size >= 8 && min_cta_dim >= 6) { return 3; } else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { return 2; } else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { return 1; } else { return 0; } } CUTLASS_HOST_DEVICE static RasterOrder get_rasterization_order( uint32_t tiles_m, uint32_t tiles_n, RasterOrderOptions raster_order_option ) { if (raster_order_option == RasterOrderOptions::Heuristic) { if (tiles_n > tiles_m) { return RasterOrder::AlongM; } else { return RasterOrder::AlongN; } } else { switch (raster_order_option) { case RasterOrderOptions::AlongN: return RasterOrder::AlongN; break; default: return RasterOrder::AlongM; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace detail } // namespace kernel } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h", "repo_id": "cutlass", "token_count": 21776 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" #include "cutlass/gemm/threadblock/mma_with_reduction_multistage.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Template defininng default matrix multiply operators inferred from threadblock tile size, /// global memory data layout, and target math instruction. template < /// Shape of threadblock-scoped matrix multiply operator typename Shape_, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Reduce operand A or B along K dimension bool ReduceKForA_, /// Number of stages int Stages = 2, /// Operation performed by MMA typename Operator = typename platform::conditional< (platform::is_same<OperatorClass, cutlass::arch::OpClassTensorOp>::value) && (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), cutlass::arch::OpMultiplyAddSaturate, cutlass::arch::OpMultiplyAdd>::type, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA = cutlass::arch::CacheOperation::Global, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB = cutlass::arch::CacheOperation::Global, /// per-element transformation for elements of A ComplexTransform TransformA = ComplexTransform::kNone, /// per-element transformation for elements of B ComplexTransform TransformB = ComplexTransform::kNone, bool IsComplex = false// (is_complex<ElementA>::value || is_complex<ElementB>::value) > struct DefaultMmaWithReductionCore { using Base = DefaultMmaCore<Shape_, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex>; using Shape = Shape_; using IteratorThreadMapA = typename Base::IteratorThreadMapA; using IteratorThreadMapB = typename Base::IteratorThreadMapB; using SmemIteratorA = typename Base::SmemIteratorA; using SmemIteratorB = typename Base::SmemIteratorB; using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; using WarpCount = typename Base::WarpCount; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaWithReductionTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, ReduceKForA_, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h", "repo_id": "cutlass", "token_count": 2646 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/warp/mma_complex_tensor_op.h" #include "cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h" #include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Complex transform on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transform on B operand ComplexTransform TransformB = ComplexTransform::kNone, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_ = arch::OpMultiplyAddComplex> struct DefaultMmaComplexTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case // 4 real-valued mma operations // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation // 3 real-valued mma operations // A = (ar + j ai), B = (br +j bi), D = AB // P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) // D = dr + j di = (P1 - P3) + j (P1 + P2) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddGaussianComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use TF32 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use BF16 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 operations on BF16 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddFastBF16> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, bfloat16_t, cutlass::layout::RowMajor, bfloat16_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use F16 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.f16.f16.f32 operations on F16 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddFastF16> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.f16.f16.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, half_t, cutlass::layout::RowMajor, half_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3xTF32 or 4xTF32 (fast and accurate complex<float> operation) /// Partial specialization - input and output types are complex<float> * complex<float> // Use 3xTF32 or 4xTF32 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = 3x[(ar*br - ai*bi) + j (ar*bi + ai*br)] ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplexFastF32> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOpFastF32< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<double>*complex<double> case // 4 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, GemmShape<16, 8, 4>, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 4>, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB, true>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation // 3 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations // A = (ar + j ai), B = (br +j bi), D = AB // P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) // D = dr + j di = (P1 - P3) + j (P1 + P2) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, GemmShape<16, 8, 4>, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddGaussianComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 4>, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB, true>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h", "repo_id": "cutlass", "token_count": 7122 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level loading scale and bias vectors. Every scale/bias data only needs to be loaded once for every channel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of threads participating in one matrix operation int Threads, /// Number of partitions along K dimension int PartitionsK_ = 1> class ScaleBiasTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of partitions along K dimension int PartitionsK_> class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::PitchLinear, InstructionShape_, Policy_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::PitchLinear; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// Number of partitions along K dimension static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection using Policy = Policy_; private: /// Pointer type used for accesses using AccessType = Array<Element, kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, 2 * Policy::kLdsmOpInner * InstructionShape::kContiguous / kThreads>; private: /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Internal counter used to determine when to increment byte offset and when /// to XOR it int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE ScaleBiasTileIterator() : pointer_(nullptr), byte_offset_(0), k_group_idx_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id) : byte_offset_(0), k_group_idx_(0) { /// 16816 only pointer_ = reinterpret_cast<AccessType const *>(ref_scale_bias.data()) + ((lane_id >> 3) & 1) * Shape::kContiguous / kElementsPerAccess + (lane_id >> 4); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof_bits<Element>::value / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE ScaleBiasTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; byte_offset_ += k_groups_delta * sizeof_bits<Element>::value * kElementsPerAccess * Policy::LdsmShape::kContiguous / 8; // Multiply by 2 because scale and bias belonging to the same stage are next // to each other in the shared memory. pointer_ += (2 * whole_tiles * Shape::kContiguous / kElementsPerAccess); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE ScaleBiasTileIterator &operator++() { byte_offset_ += Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * kElementsPerAccess / 8; k_group_idx_++; if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) { k_group_idx_ = 0; byte_offset_ -= (Policy::kGroupsPerTile / kPartitionsK) * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * kElementsPerAccess / 8; add_tile_offset({Policy::kGroupsPerTile, 0}); } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator--() { assert(0); } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, 4> *fetch_ptr = reinterpret_cast<Array<unsigned, 4> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < 1; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsmShape::kContiguous * c; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::RowMajor, 4>( fetch_ptr[access_idx], source_byte_ptr); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous / kElementsPerAccess; byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of partitions along K dimension int PartitionsK_> class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, Policy_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection using Policy = Policy_; /// Underlying tile iterator implementation using Base = ScaleBiasTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, Policy, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE ScaleBiasTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id) : iterator_({ref_scale_bias.data(), ref_scale_bias.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE ScaleBiasTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE ScaleBiasTileIterator &add_tile_offset_negative( TensorCoord const &tile_offset) { iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h", "repo_id": "cutlass", "token_count": 6222 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/matrix_coord.h" #include "cutlass/layout/pitch_linear.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace layout { //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). /// This one is the base class of all Ampere/Turing fp16/bf16/int8/int4/int1 /// tensor core kernels. tf32 TN uses this too. template <int ElementSize, int Crosswise> struct TensorOpMultiplicand { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; static int const kCrosswise = Crosswise; /// Contiguous dimension of the tile shape matches one shared memory cache /// line - 128B. For 128bit access size, it equals to 8 accesses. static int const kTileShapeContiguous = 128 / (kAccessSize / 8); /// Number of kblocks to store PartitionShape::kContiguous Elements static int const kFactor = kTileShapeContiguous * kElementsPerAccess / kCrosswise; static_assert( (kFactor > 0), "kCrosswise should be no large than one shared memory cache line."); /// The strided dimension needs to be at least (WarpSize(32) / /// kTileShapeContiguous) for a warp to access. To ensure conflict free /// access, it also needs to be at least (kTileShapeContiguous / kFactor). /// See comments below static int const kTileShapeStride = ((kTileShapeContiguous / kFactor) > (32 / kTileShapeContiguous)) ? (kTileShapeContiguous / kFactor) : (32 / kTileShapeContiguous); /// Fundamental tile shape in units of vectors to guarantee bank conflict free /// shared memory load/store. /// For kFactor = 1, TileShape = <8, 8> /// For kFactor > 1, TileShape = <8, 4> using TileShape = PitchLinearShape<kTileShapeContiguous, kTileShapeStride>; /// Fundamental partition shape in units of vectors using PartitionShape = PitchLinearShape<4, 4>; using PartitionCount = PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided>; using AccessCount = PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>; private: // // Data members // /// Stride data member. For GEMM, it equals to kCrosswise x stage. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicand packed(TensorCoord const &extent) { return TensorOpMultiplicand(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { // // First, compute c and s of vector within source (in units of vector // accesses) // int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; int vec_strided_idx = coord.strided() / kFactor; // Compute the fundamental tile being accessed int tile_contiguous_idx = vec_contiguous_idx / (TileShape::kContiguous / kFactor); int tile_contiguous_residual = vec_contiguous_idx % (TileShape::kContiguous / kFactor) + ((coord.strided() % kFactor) * (TileShape::kContiguous / kFactor)); int tile_strided_residual = vec_strided_idx % TileShape::kStrided; // Compute the 'partition' within the fundamental tile int partition_contiguous_idx = tile_contiguous_residual / PartitionShape::kContiguous; int partition_strided_idx = tile_strided_residual / PartitionShape::kStrided; int partition_contiguous_residual = tile_contiguous_residual % PartitionShape::kContiguous; int partition_strided_residual = tile_strided_residual % PartitionShape::kStrided; // // Then swizzle // int permuted_vec_contiguous_within_partition = partition_contiguous_residual ^ (partition_strided_residual % 4); int permuted_partition_contiguous_within_tile = partition_contiguous_idx ^ (partition_strided_idx % 2); // // Compute final element location // int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + permuted_partition_contiguous_within_tile * PartitionShape::kContiguous + permuted_vec_contiguous_within_partition) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); int element_strided = vec_strided_idx; return element_contiguous + element_strided * stride_[0] * kFactor; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). template <int ElementSize, int Crosswise> struct TensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(coord); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return coord; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(extent); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). /// This one is just for TF32 NT kernel. template <int Crosswise> struct TensorOpMultiplicandCongruous<32, Crosswise> { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; /// Fundamental tile shape in units of vectors using TileShape = PitchLinearShape<8, 4>; /// Partitionshape is the same as TileShape for this layout using PartitionShape = PitchLinearShape<8, 4>; using PartitionCount = PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided>; using AccessCount = PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>; // // Static constants // static int const kElementSize = 32; static int const kElementsPerAccess = kAccessSize / kElementSize; static int const kCrosswise = Crosswise; static int const kFactor = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int tc = coord.contiguous() / 32; int ts = coord.strided() / 4; int c = (coord.contiguous() % 32) / kElementsPerAccess; int s = coord.strided() % 4; LongIndex offset = (c ^ (2 * s)) * kElementsPerAccess + s * stride_[0] + tc * 32 + ts * stride_[0] * 4 + coord.contiguous() % 4; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicand template <int ElementSize, int Crosswise> struct ColumnMajorTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCongruous(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicand template <int ElementSize, int Crosswise> struct RowMajorTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCongruous(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). template <int ElementSize, int Crosswise> struct TensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCrosswise packed(TensorCoord const &extent) { return TensorOpMultiplicandCrosswise(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(coord); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return coord; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(extent); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicandCrosswise template <int ElementSize, int Crosswise> struct ColumnMajorTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCrosswise(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicandCrosswise template <int ElementSize, int Crosswise> struct RowMajorTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCrosswise(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear memory. template <int ElementSize, int InterleavedK> struct TensorOpMultiplicandColumnMajorInterleaved { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; //static int const kThreadBlockStrided = ThreadBlockStrided; static int const kInterleavedK = InterleavedK; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandColumnMajorInterleaved(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandColumnMajorInterleaved(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandColumnMajorInterleaved packed(TensorCoord const &extent) { return TensorOpMultiplicandColumnMajorInterleaved(extent[0] * kInterleavedK); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int const rows_per_smem_cache_line = 128 / kInterleavedK; int row_id = coord.strided() / rows_per_smem_cache_line; int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); int access_block_id = col_id >> 4; int swizzle_access_block_id = access_block_id ^ (row_id & 1); int swizzle_col_id = swizzle_access_block_id << 4; return row_id * 128 + swizzle_col_id; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return (extent[1] / kInterleavedK) * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear memory. template <int ElementSize, int InterleavedK> struct TensorOpMultiplicandRowMajorInterleaved { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; //static int const kThreadBlockStrided = ThreadBlockStrided; static int const kInterleavedK = InterleavedK; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandRowMajorInterleaved(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandRowMajorInterleaved(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandRowMajorInterleaved packed(TensorCoord const &extent) { return TensorOpMultiplicandRowMajorInterleaved(extent[1] * kInterleavedK); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int const rows_per_smem_cache_line = 128 / kInterleavedK; int row_id = coord.strided() / rows_per_smem_cache_line; int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); int access_block_id = col_id >> 4; int swizzle_access_block_id = access_block_id ^ (row_id & 1); int swizzle_col_id = swizzle_access_block_id << 4; return row_id * 128 + swizzle_col_id; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return (extent[0] / kInterleavedK) * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h/0
{ "file_path": "cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h", "repo_id": "cutlass", "token_count": 10607 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over densely packed tensors in global memory */ #pragma once #include "cutlass/device_kernel.h" #include "cutlass/reduction/kernel/reduce_split_k.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ReductionKernel_ > class ReduceSplitK { public: using ReductionKernel = ReductionKernel_; using Shape = typename ReductionKernel::Shape; using ReductionOp = typename ReductionKernel::ReductionOp; using OutputOp = typename ReductionKernel::OutputOp; using ElementWorkspace = typename ReductionKernel::ElementWorkspace; using ElementAccumulator = typename ReductionKernel::ElementAccumulator; using ElementOutput = typename ReductionKernel::ElementOutput; using WorkspaceTensorRef = typename ReductionKernel::WorkspaceTensorRef; using OutputTensorRef = typename ReductionKernel::OutputTensorRef; using StrideIndex = typename ReductionKernel::StrideIndex; /// Argument structure struct Arguments { // // Data members // MatrixCoord problem_size{0,0}; int partitions{1}; size_t partition_stride{0}; WorkspaceTensorRef workspace{}; OutputTensorRef destination{}; OutputTensorRef source{}; typename OutputOp::Params output{}; typename ReductionOp::Params reduction{}; // // Methods // /// Default ctor Arguments() = default; CUTLASS_HOST_DEVICE Arguments( MatrixCoord const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( MatrixCoord problem_size_, int partitions_, size_t partition_stride_, WorkspaceTensorRef workspace_, OutputTensorRef destination_, OutputTensorRef source_, typename OutputOp::Params output_ = typename OutputOp::Params(), typename ReductionOp::Params reduction_ = typename ReductionOp::Params() ): problem_size(problem_size_), partitions(partitions_), partition_stride(partition_stride_), workspace(workspace_), destination(destination_), source(source_), output(output_), reduction(reduction_) { } }; private: /// Kernel parameters object typename ReductionKernel::Params params_; public: /// Constructs Reduction SplitK ReduceSplitK() { } /// Determines whether the ReduceSplitK can execute the given problem. static Status can_implement(Arguments const &args) { return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { // needs no additional workspace return 0; } /// Initializes Reduction state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // initialize the params structure from the arguments params_ = typename ReductionKernel::Params( args.problem_size, args.partitions, args.partition_stride, args.workspace, args.destination, args.source, args.output, args.reduction ); return Status::kSuccess; } /// Initializes Reduction kernel state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.workspace.reset(args.workspace.non_const_ref().data()); params_.destination.reset(args.destination.non_const_ref().data()); params_.source.reset(args.source.non_const_ref().data()); params_.output = args.output; params_.reduction = args.reduction; return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { // // Launch reduction kernel // dim3 block = ReductionKernel::block_shape(); dim3 grid = ReductionKernel::grid_shape(params_.problem_size); Kernel<ReductionKernel><<< grid, block, 0, stream >>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace reduction } // namespace cutlass
cutlass/include/cutlass/reduction/device/reduce_split_k.h/0
{ "file_path": "cutlass/include/cutlass/reduction/device/reduce_split_k.h", "repo_id": "cutlass", "token_count": 2047 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a structure containing strides, bounds, and a pointer to tensor data. */ #pragma once #include <cstdint> #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/tensor_ref.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element_> struct PlanarComplexReference { // // Type definitions // using Element = Element_; using ComplexElement = complex<Element>; // // Data members // Element *real; Element *imag; // // Methods // CUTLASS_HOST_DEVICE PlanarComplexReference( Element *real_ = nullptr, Element *imag_ = nullptr ): real(real_), imag(imag_) { } /// Loads the complex element CUTLASS_HOST_DEVICE operator complex<Element>() const { return complex<Element>{*real, *imag}; } /// Stores a complex element to the location pointed to by the reference CUTLASS_HOST_DEVICE PlanarComplexReference &operator=(complex<Element> const &rhs) { *real = rhs.real(); *imag = rhs.imag(); return *this; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank and layout within memory. A TensorRef combines a pointer and a Layout concept */ template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class TensorRefPlanarComplex { public: /// Data type of individual access using Element = Element_; /// Complex element type using ComplexElement = complex<Element>; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; static_assert(sizeof_bits<Element>::value >= 8, "Planar complex not suitable for subbyte elements at this time"); /// Reference type to an element using Reference = PlanarComplexReference<Element>; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// TensorRef to constant data using ConstTensorRef = TensorRefPlanarComplex< typename platform::remove_const<Element>::type const, Layout>; /// TensorRef to non-constant data using NonConstTensorRef = TensorRefPlanarComplex< typename platform::remove_const<Element>::type, Layout>; /// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a /// scalar, but degenerate cases such as these are difficult to accommodate without /// extensive C++ metaprogramming or support for zero-length arrays. static_assert(kRank > 0, "Cannot define a zero-rank TensorRef"); private: /// Pointer Element* ptr_; /// Layout object maps logical coordinates to linear offsets Layout layout_; /// Offset to imaginary part LongIndex imaginary_stride_; public: // // Methods // /// Constructs a TensorRef with a pointer and layout object. CUTLASS_HOST_DEVICE TensorRefPlanarComplex( Element *ptr = nullptr, ///< pointer to start of tensor Layout const &layout = Layout(), ///< layout object containing stride and mapping function LongIndex imaginary_stride = 0 ): ptr_(ptr), layout_(layout), imaginary_stride_(imaginary_stride) { } /// Converting constructor from TensorRef to non-constant data. CUTLASS_HOST_DEVICE TensorRefPlanarComplex( NonConstTensorRef const &ref ///< TensorRef to non-const data ): ptr_(ref.data()), layout_(ref.layout()), imaginary_stride_(ref.imaginary_stride_) { } /// Returns a reference to constant-valued tensor. CUTLASS_HOST_DEVICE ConstTensorRef const_ref() const { return ConstTensorRef(ptr_, layout_, imaginary_stride_); } CUTLASS_HOST_DEVICE NonConstTensorRef non_const_ref() const { return NonConstTensorRef( const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_, imaginary_stride_); } /// Updates only the pointer CUTLASS_HOST_DEVICE void reset(Element* ptr = nullptr, LongIndex imaginary_stride = 0) { ptr_ = ptr; imaginary_stride_ = imaginary_stride; } /// Updates the pointer and layout object CUTLASS_HOST_DEVICE void reset(Element* ptr, Layout const &layout, LongIndex imaginary_stride) { ptr_ = ptr; layout_ = layout; imaginary_stride_ = imaginary_stride; } /// Returns true if the TensorRef is non-null CUTLASS_HOST_DEVICE bool good() const { return ptr_ != nullptr; } /// Returns the pointer to referenced data CUTLASS_HOST_DEVICE Element * data() const { return ptr_; } /// Returns the pointer to referenced data CUTLASS_HOST_DEVICE Element * imaginary_data() const { return ptr_ + imaginary_stride_; } /// Returns a reference to the element at a given linear index CUTLASS_HOST_DEVICE Reference data(LongIndex idx) const { return Reference(ptr_ + idx, ptr_ + idx + imaginary_stride_); } /// Returns the layout object CUTLASS_HOST_DEVICE Layout & layout() { return layout_; } /// Returns the layout object CUTLASS_HOST_DEVICE Layout layout() const { return layout_; } /// Gets the stride to an imaginary element LongIndex imaginary_stride() const { return imaginary_stride_; } /// Gets the stride to an imaginary element LongIndex &imaginary_stride() { return imaginary_stride_; } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE Index stride(int dim) const { return layout_.stride().at(dim); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE Index & stride(int dim) { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor CUTLASS_HOST_DEVICE LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference at(TensorCoord const& coord) const { return data(offset(coord)); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference operator[](TensorCoord const& coord) const { return data(offset(coord)); } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRefPlanarComplex & add_pointer_offset(LongIndex offset_) { ptr_ += offset_; return *this; } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRefPlanarComplex & add_coord_offset(TensorCoord const &coord) { add_pointer_offset(offset(coord)); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex operator+(TensorCoord const& b) const { TensorRefPlanarComplex result(*this); result.add_coord_offset(b); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex & operator+=(TensorCoord const& b) { add_coord_offset(b); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex operator-(TensorCoord const& b) const { TensorRefPlanarComplex result(*this); result.add_pointer_offset(-offset(b)); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex & operator-=(TensorCoord const& b) { add_pointer_offset(-offset(b)); return *this; } /// TensorRef to real-valued tensor CUTLASS_HOST_DEVICE cutlass::TensorRef<Element, Layout> ref_real() const { return cutlass::TensorRef<Element, Layout>(data(), layout()); } /// TensorRef to real-valued tensor CUTLASS_HOST_DEVICE cutlass::TensorRef<Element, Layout> ref_imag() const { return cutlass::TensorRef<Element, Layout>(imaginary_data(), layout()); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Constructs a TensorRef, deducing types from arguments. template < typename Element, typename Layout > CUTLASS_HOST_DEVICE TensorRefPlanarComplex<Element, Layout> make_TensorRefPlanarComplex( Element *ptr, Layout const &layout, int64_t imaginary_stride) { return TensorRefPlanarComplex<Element, Layout>(ptr, layout, imaginary_stride); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/tensor_ref_planar_complex.h/0
{ "file_path": "cutlass/include/cutlass/tensor_ref_planar_complex.h", "repo_id": "cutlass", "token_count": 3414 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile first, with the objective of minimizing predicate mask updates during steady-state operation. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIterator2dThreadTile /// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType> class PredicatedTileAccessIterator2dThreadTile; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; static int const kPredicatesPerByte = 4; static int const kPredicatesPerWord = 4 * kPredicatesPerByte; /// Number of 32b words containing predicates static int const kPredicateByteCount = (ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kStrided + kPredicatesPerByte - 1) / kPredicatesPerByte; static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; static_assert(kPredicateWordCount <= 4, "Too many predicates."); /// Predicate vector stores mask to guard accesses using Mask = Array<uint32_t, kPredicateWordCount>; /// Uses a non-template class struct Params : PredicatedTileAccessIteratorParams { public: friend PredicatedTileAccessIterator2dThreadTile; using Base = PredicatedTileAccessIteratorParams; // Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : Base(layout.stride(0), MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Parameters object with precomputed internal state Params const &params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Guard predicates uint32_t predicates_[kPredicateWordCount]; /// Size of tensor TensorCoord extent_; /// Initial offset for each thread TensorCoord thread_offset_; /// Index of residue tile int residue_tile_idx_; /// Used for out-of-order visitation bool is_residue_tile_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; /// Tracks iterations within the thread loop int iteration_thread_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_HOST_DEVICE void compute_predicates_( /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++) { TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous, ts + s * ThreadMap::Delta::kStrided); TensorCoord coord = thread_offset_ + iteration_coord; bool guard; if (is_steady_state) { if (kAdvanceRank == 0) { guard = (coord.strided() < extent_.strided()); } else { guard = (coord.contiguous() < extent_.contiguous()); } } else { guard = (coord.strided() < extent_.strided() && coord.contiguous() < extent_.contiguous()); } int pred_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } } } } public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), extent_(extent), is_residue_tile_(true) { TensorCoord residue_offset; if (kAdvanceRank) { residue_tile_idx_ = (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / Shape::kStrided; residue_offset = make_Coord(0, residue_tile_idx_ * Shape::kStrided); } else { residue_tile_idx_ = (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / Shape::kContiguous; residue_offset = make_Coord(residue_tile_idx_ * Shape::kContiguous, 0); } // Per-thread offset in logical coordinates of tensor thread_offset_ = threadblock_offset + residue_offset + ThreadMap::initial_offset(thread_id); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(thread_offset_)); compute_predicates_(false); set_iteration_index(0); } /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { int residual = index % (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); iteration_strided_ = index / (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); iteration_contiguous_ = residual / ThreadMap::ThreadAccessShape::kStrided; iteration_thread_ = residual % ThreadMap::ThreadAccessShape::kStrided; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += int(sizeof(Element)) * pointer_offset; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { if (is_residue_tile_) { TensorCoord residue_offset; if (kAdvanceRank) { residue_offset = TensorCoord(0, residue_tile_idx_ * Shape::kStrided); } else { residue_offset = TensorCoord(residue_tile_idx_ * Shape::kContiguous, 0); } thread_offset_ -= residue_offset; Layout layout(params_.stride_); add_pointer_offset(-layout(residue_offset)); compute_predicates_(true); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * (tile_offset.strided() - 1); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * (tile_offset.contiguous() - 1); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * tile_offset.strided(); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * tile_offset.contiguous(); pointer_ += Shape::kStrided * tile_offset.strided(); } } is_residue_tile_ = false; } CUTLASS_HOST_DEVICE AccessType *get() const { AccessType *ret_val = reinterpret_cast<AccessType *>( pointer_ + (iteration_thread_ * params_.stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous) * int(sizeof(Element))); return ret_val; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { iteration_thread_++; if (iteration_thread_ < ThreadMap::ThreadAccessShape::kStrided) return *this; iteration_thread_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = enable ? 0u : predicates_[i]; } } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0xffffffff; } } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = mask[i]; } } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = predicates_[i]; } } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { int pred_idx = iteration_thread_ + iteration_contiguous_ * ThreadMap::ThreadAccessShape::kStrided + iteration_strided_ * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; return pred; } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator2dThreadTile; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator2dThreadTile; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h", "repo_id": "cutlass", "token_count": 9733 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing storing of tiles from pitch-linear rank=2 tensors. */ #pragma once #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in length. static int const kAccessSizeInBits = 128; static_assert( sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_tile_offset({0, 1}); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_tile_offset(coord); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset; AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr); frag_ptr[access_idx] = *access_ptr; ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset; AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr); *access_ptr = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator( TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ): iterator_({ref.data(), ref.stride()}, thread_id) { } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator( TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ): iterator_({ref.data(), ref.stride()}, thread_id) { } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for crosswise arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::TensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_tile_offset({1, 0}); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_tile_offset(coord); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset; AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr); *access_ptr = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for k interleaved arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_pointer_offset(Shape::kCount); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_pointer_offset(coord.contiguous() * Shape::kCount); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; *(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for k interleaved arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< cutlass::MatrixShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, (kAdvanceRank == 1 ? 0 : 1), ThreadMap >; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.strided(), coord.contiguous()}); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h", "repo_id": "cutlass", "token_count": 12671 }
42
# CuTe's support for Matrix Multiply-Accumulate instructions In this file, we explain in detail how we support our GPUs' Matrix Multiply-Accumulate (MMA) hardware instructions in CuTe. MMAs are architecture-specific. Different generations of GPU architectures introduce different sets of MMA instructions. However, CuTe features such as `Layout` makes it possible to expose MMAs for use in generic CUDA C++ code. We accomplish this in multiple steps. 1. We wrap each MMA's PTX instruction in an "Operation" struct. 2. For each Operation struct, we define a "Traits" struct that defines all of the meta-information needed to use the Operation. 3. Combining the above, an "Atom" is the combination of the PTX Operation struct with the meta-information Traits struct and provides methods to construct `cute::Tensor` "fragments" for that Operation and to use that Operation on existing `cute::Tensor`s. 4. Combining potentially multiple Atoms, a "TiledMMA" provides utilities for building more complex partitioning patterns by creating layouts and interleavings of Atoms. ## CuTe MMA Atoms CuTe exposes each MMA to generic CUDA C++ code as a pair of structs: an "Operation" struct, and an `MMA_Traits` struct templated on the Operation struct type. An "Operation" struct exposes the PTX instruction for that specific operation. It defines the arguments and interface it expects. Operation structs have minimal software dependencies -- they do not use layouts, tensors, or non-standard numeric data types -- and describe only the physical inputs and outputs to the instruction. Different structs have different names that describe what the MMA instruction does. We will explain the naming scheme below. A corresponding `MMA_Traits` struct specialization defines meta-information about the Operation, such as the logical compute types, the logical shape of the operation, and the `Layout`s of threads and values within the operation. The `MMA_Traits` struct takes the Operation as a template parameter. CuTe specializes `MMA_Traits` for each Operation type that it supports. Together, these two types comprise an "Atom" that decouples the complexity of thread and data layouts from the call site of the PTX instruction. The Atom's Traits struct exposes information that is relevant to a single MMA operation, no matter the granularity at which it operates. CuTe MMA atoms expose the semantics of a single MMA operation. This is true regardless of the hardware level at which the MMA operates. CuTe supports MMA atoms that operate at a variety of hardware levels, including * a single thread (e.g., fused multiply-add (FMA) instruction); * a quadpair (Volta); * a single warp (Ampere); and * a warpgroup (Hopper). ### Operation structs #### Location of files CuTe provides its Operations structs in the [`include/cute/arch`](../../../include/cute/arch) directory, in header files starting with `mma`. #### Operation struct's name A CuTe Operation struct's name principally encodes the PTX instruction it wraps. These often include * its first supported architecture, * the M, N, and K dimensions that it accepts, * the types that it takes, and * the arrangement of the A and B inputs. For example, the Volta section below will refer to the `SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in [`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp). * "SM70" refers to Volta. * "8x8x4" refers to M = 8, N = 8, and K = 4, the dimensions of the MMA operation that the quadpair performs (see below). This is reflected in the PTX as `.m8n8k4.`. * "F32F16F16F32" refers to the element types of the four matrix operands A, B, C, and D. An MMA computes D = C + A * B, so we read the types from left to right: D is F32 (`float`), A is F16 (half), B is F16 (half), and C is F32 (`float`). This is reflected in the PTX instruction name as `.f32.f16.f16.f32`. * "NT" means that the PTX instruction is designed for inputs A as M-major (not transposed, column-major) and inputs B as N-major (transposed, row-major). This is reflected in the PTX instruction name as `.col.row.`. #### Contents An Operation struct has the following members. ##### Type aliases An Operation struct has four public type aliases: `DRegisters`, `ARegisters`, `BRegisters`, and `CRegisters`. For example, the `SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in [`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp) defines these as follows. ```c++ using DRegisters = float[8]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[2]; using CRegisters = float[8]; ``` This shows how many values each thread will pass into the PTX instruction for each of the matrices A, B, C, and D. For this Operation, each thread passes 8 F32 values each for C and D (hence `float[8]`), and 4 F16 values each for A and B (hence `uint32_t[2]`; the instruction packs two 16-bit F16 values in each of the two 32-bit `uint32_t` values). ##### `fma` static member device function An operation struct defines a public `static void fma` function. It is marked with the `CUTE_HOST_DEVICE` macro, which adds the `__host__ __device__` annotations. Different Operations define `fma` to take different numbers of arguments, depending on the PTX MMA instruction. The implementation protects use of the PTX instruction with a macro, and raises an `assert` if `fma` is called when the macro is not defined. This ensures that tests and examples that use this Operation in an Atom can still compile, even if the PTX instruction is not available. ### Traits #### Location of files CuTe provides its Traits structs in the [`include/cute/atom`](../../../include/cute/atom) directory, in header files starting with `mma_traits`. #### Contents An `MMA_Traits` specialization defines the following public type aliases. * `ValTypeD`: Logical compute type of the D matrix * `ValTypeA`: Logical compute type of the A matrix * `ValTypeB`: Logical compute type of the B matrix * `ValTypeC`: Logical compute type of the C matrix * `Shape_MNK`: Logical MxNxK shape of the MMA operation * `ThrID`: Logical thread mapping within the single MMA operation (specifying the thread, quadpair, warp, or warpgroup view) * `ALayout`: Mapping of (thread,value) pairs to coordinates in the MxK A matrix * `BLayout`: Mapping of (thread,value) pairs to coordinates in the NxK B matrix * `CLayout`: Mapping of (thread,value) pairs to coordinates in the MxN C matrix #### Example The specialization of MMA_Traits for the `SM70_8x8x4_F32F16F16F32_NT` Operation lives in the header file [`include/cute/atom/mma_traits_sm70.hpp`](../../../include/cute/atom/mma_traits_sm70.hpp). It looks like this. ```c++ template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NT> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_32b; }; ``` The next section will explain these type aliases in detail. ## Volta This and the following sections show examples of how to construct MMA atoms. We don't try to explain this for all GPU architectures and MMAs. Instead, we use selected examples to illustrate the process of developing new atoms. Volta architecture implements an HMMA instruction where a group of 8 threads called a quadpair (QP) collaborate to share data and perform an 8x8x4 (fp32 or fp16) matrix multiply-accumulate. (since a warp is 32 threads wide, it would perform an MMA across 4 QPs for a tile size of 16x16x4). We first take a look at how we would take the ISA semantics of thread and data partitioning for the HMMA instruction, and encode it in a Traits struct. The HMMA NT instruction has the thread-data layout: <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT.png" alt="HMMA.8x8x4.NT.png" height="400"/> </p> ### Types The HMMA NT above uses types: ```cpp using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; ``` The rest of the `MMA_Traits` will be described in units of these types. ### Shape The HMMA NT above has shape 8x8x4: ```cpp // Logical shape of the MMA using Shape_MNK = Shape <_8,_8,_4>; ``` ### Thread ID If the 32 threads in a warp are logically indexed by [0 ... 31], then the above image contains threads [0,1,2,3]U[16,17,18,19]. These threads make up the 0th quadpair. We can write a thread mapping that maps eight logical thread ids [0,1,2,3,4,5,6,7] of the MMA to a quadpair thread index [0,1,2,3]U[16,17,18,19] of a warp. The layout function has 4 elements with a stride of 1 and 2 of those with a stride of 16. With this, we write a layout that represents a quadpair: ```cpp // Mapping from (logical thread id) -> (thread idx) using ThrID = Layout<Shape <_4, _2>, Stride<_1,_16>>; ``` Again, this layout function maps the logical thread id [0,8) of the MMA operation onto the quadpair thread index [0,4)U[16,20) of a warp. ### Accumulator Mapping Let us look at exactly how the 8 threads within a QP are mapped to the A, B and C matrices. For the C and D matrices, the above image is broken down a bit more below. On the left is shown the whole QP level view, and on the right is shown the values owned by just thread 0. <p align="center"> <img src="../../images/cute/HMMA.8x8x4.quadpair.C.png" alt="HMMA.8x8x4.quadpair.C.png" height="400"/> </p> The metainformation of this single instruction level view is what we want to encode in CuTe. Specifically, the QP level view in this diagram corresponds to the four MMA traits for [SM70_F32F16F16F32](../../../include/cute/arch/mma_sm70.hpp). These structs contain the `Element` types, the `Shape_MNK`, and the `ThrID` mapping we constructed above. Now, let us take a look at the definition of `CLayout`, the thread-data layout of accumulators. The job of `CLayout` is to construct a mapping between the `(logical_thr_id, logical_val_id)` and `(m, n)` coordinate in the C matrix which can then be used to build up more complicated layouts and operations like the 16x16x4 WMMA. We can start constructing a `CLayout` from the picture above. As with any CuTe layout, it is a pair of `Shape` and corresponding `Stride`. Let us just look at the shape for now. We know that the HMMA uses 8 threads each of which own 8 values. Therefore, the shape of our mapping must have a size of 8 along two modes. With this, we have ```cpp // (T8,V8) -> (m,n) using CLayout = Layout<Shape <_8, _8>, Stride<_?, _?>; // Stride to be filled in below ``` This is not to be confused with the logical 8x8 shape of the C matrix. This is 8-threads by 8-values. We now want to map those to (m,n) coordinates. Since CuTe layouts return indices rather than coordinates, we choose a column-major encoding of the (m,n) coordinates: ``` (logical_thr_id, logical_val_id) -> (m, n) == m + n * M ``` With this in place, we can start thinking about how to construct the strides in `CLayout`. Let's begin by looking at the strides between threads. Note that * `(T0,V0)` is located at `(m,n) = (0,0) = 0` * `(T1,V0)` is located at `(m,n) = (1,0) = 1` * `(T2,V0)` is located at `(m,n) = (0,2) = 16` * `(T3,V0)` is located at `(m,n) = (1,2) = 17` * `(T4,V0)` is located at `(m,n) = (4,0) = 4` * `(T5,V0)` is located at `(m,n) = (5,0) = 5` * `(T6,V0)` is located at `(m,n) = (4,2) = 20` * `(T7,V0)` is located at `(m,n) = (5,2) = 21` where `T4`,`T5`,`T6`,`T7` are the 4th,5th,6th,7th logical thread id of the MMA corresponding to thread indices of 16,17,18,19 of the warp (recorded in the `ThrID` mapping!). We note that the pattern can be transcribed to a layout. We can find the position of the 8 threads via ```cpp using CLayout = Layout<Shape <Shape <_2, _2, _2>, _8>, Stride<Stride<_1, _16, _4>, _?>; ``` With the exact same approach, we can construct the stride along the `logical value id` mode. * `(T0,V0)` is located at `(m,n) = (0,0) = 0` * `(T0,V1)` is located at `(m,n) = (0,1) = 8` * `(T0,V2)` is located at `(m,n) = (2,0) = 2` * `(T0,V3)` is located at `(m,n) = (2,1) = 10` * `(T0,V4)` is located at `(m,n) = (0,4) = 32` * `(T0,V5)` is located at `(m,n) = (0,5) = 40` * `(T0,V6)` is located at `(m,n) = (2,4) = 34` * `(T0,V7)` is located at `(m,n) = (2,5) = 42` We note that this pattern can also be transcribed to a layout. We can find the position of the 8 values via ```cpp // (T8,V8) -> (m,n) using CLayout = Layout<Shape <Shape <_2, _2,_2>, Shape <_2,_2, _2>>, Stride<Stride<_1,_16,_4>, Stride<_8,_2,_32>>>; ``` And that's all! We can verify that each `(tid,vid)` coordinate in this layout is reliably mapped to the correct (encoded) `(m,n)` coordinate. In the case of F16 accumulators, the layout is way less complex. Each row of accumulators `(m, :)` is held by a single thread, which makes the layout: ```cpp using CLayout = Layout<Shape <_8,_8>, Stride<_1,_8>>; ``` ### A and B Layout Mapping A and B matrix layouts depend on whether the sources are transposed or not. The diagram below shows the thread ID to data ownership map for A and B matrices in the case of NT and TN transposes. <p align="center"> <img src="../../images/cute/HMMA.8x8x4.quadpair.AB.png" alt="HMMA.8x8x4.quadpair.AB.png" height="400"/> </p> Let's look at the TN layout for A matrix first (right side in the diagram). Again, there are the same 8 logical threads, but each threads owns only 4 elements this time. The shape of `ALayout` will then be `Shape<_8, _4>`. As for the strides, we again need a similar mapping between `(m, k) == m + k * M`. Looking down the `M` mode, we go from `(T0, V0)` to `(T1, V0)` which is a stride of 1 for all 8 threads. For the `K` mode, as we go across, we go from `(T0, V0)` to `(T0, V1)`, which makes a stride of 8 for all 4 values. Therefore, the A layout is: ```cpp // (T8,V4) -> (m,k) using ALayout = Layout<Shape <_8,_4>, Stride<_1,_8>>; ``` Source B layout is constructed similarly for the TN HMMA, except that we want write it as `(N,K)` rather than `(K,N)` for convenience. For the strides, as we go across the `N` mode, we go from `(T0, V0)` to `(T1, V0)`, making this a stride of 1 for all 8 threads. As we go down the `K` mode, `(T0, V0)` to `(T0, V1)` which is a stride of 8 for all 4 values. So the B layout is the same as A: ```cpp // (T8,V4) -> (n,k) using BLayout = Layout<Shape <_8,_4>, Stride<_1,_8>>; ``` The layouts in the case of NT are a bit more complicated (left side of the diagram). Going down the `M` mode of `A`, we see the four values of `T0` first and then we see the four values of `T4`. This means we first have a stride of 1 for 4 values, followed by a stride of 4 from `T0` to `T4`. So we have two sub-strides along the `M` mode. For the `K` mode, as we go across, we simply increment the `thr_id`, keeping `val_id` the same, making the stride 8 for 4 threads. This makes the A layout: ```cpp // (T8,V4) -> (m,k) using ALayout = Layout<Shape <Shape <_4,_2>,_4>, Stride<Stride<_8,_4>,_1>>; ``` With the `(N,K)` ordering for B, the layout is the same. ```cpp // (T8,V4) -> (n,k) using BLayout = Layout<Shape <Shape <_4,_2>,_4>, Stride<Stride<_8,_4>,_1>>; ``` For the NN and TT transposes, they are simply combinations of the two layouts we have seen for A and B so far. ## Hopper Now, we are ready to take a look at the much larger GMMA operation (Group MMA) first introduced with Hopper architecture. These MMA instructions operate at the granularity of 128 threads (4 warps), which are collectively referred to as a warpgroup. ### Thread ID In the case of Hopper GMMAs, the thread IDs are assigned based on the simple 1D contiguous layout, which makes `thrID` trivial: ```cpp using ThrID = Layout<_128, _1>; ``` ### Accumulator Mapping Accumulators are mapped hierarchically in GMMA, starting from the concept of a core matrix and building up to a layout for the whole C matrix tile. Let's look at this core matrix first. We only consider fp16 accumulators here, but extensions of fp32 accumulators as trivial as we will see later. Each core matrix has the layout as shown in the diagram below. <p align="center"> <img src="../../images/cute/gmma_coremat_cd_fp16.png" alt="gmma_coremat_cd_fp16.png" height="600"/> </p> As in the Volta examples, the thread IDs are logical only, and which of the four warps they belong to in the warpgroup is not important. Then GMMA tiles this core matrix first vertically along the M mode, and then repeats that column of core matrices along the N mode to construct the full MxN tile. This tiling is shown in the image below. <p align="center"> <img src="../../images/cute/gmma_wg_n_slice.png" alt="gmma_wg_n_slice.png" height="600"/> </p> With this image, we are again ready to start building the `CLayout` for `SM90_64x128x16_F16F16F16F16_TN` atom. Same as before, we are constructing a mapping between the `(logical_thr_id, logical_val_id) -> (m, n)` coordinate spaces. To begin, let's follow the first few threads and values. We immediately see that they are arranged along the `N`-mode with pairs of values and four threads. This gives us ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, ...>, Shape < _2, ...>>, Stride<Stride<_128, ...>, Stride<_64, ...>>>; ``` To complete the first 8x8 core matrix, the four threads repeat eight times down the `M`-mode: ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, ...>, Shape < _2, ...>>, Stride<Stride<_128, _1, ...>, Stride<_64, ...>>>; ``` Then, as we go to the next core matrix, we wrap back again to `T0`, but this time to `(T0, V2)`. ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, ...>, Shape < _2, _2>>, Stride<Stride<_128, _1, ...>, Stride<_64, _8>>>; ``` Finally, we get this entire pattern repeating four times, once for each warp, down the `M`-mode starting at `(m,n) = (16,0) = 16`. where two core matrices that belong to the same warp are stacked on top of each other. This makes the size of the final sub-mode of M 4. As for the stride, this time we go to `(T32, V0)`, which makes it a stride of 32. ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, _4>, Shape < _2, _2>>, Stride<Stride<_128, _1, _16>, Stride<_64, _8>>>; ``` This is the full `CLayout` for 64x8 accumulators. The GMMA instructions include 64xN variants with `N = [16,32,64,128,256]` where this 64x8 pattern is repeated giving each thread additional values. As this starts at `(m,n) = (0,8) = 512`, this is easy to account for in our `CLayout`. For example, the 64x128 `CLayout` is ```cpp // (T128,V64) -> (M64,N128) using CLayout = Layout<Shape <Shape < _4, _8, _4>, Shape < _2, _2, _16>>, Stride<Stride<_128, _1, _16>, Stride<_64, _8, _512>>>; ``` where we see 16 copies of the 64x8 tile. ### A and B Layout Mapping GMMA atoms that consume A and B sources directly from shared memory are a bit interesting. The GMMA Descriptor is constructed on an entire tile of A and/or B data in shared memory rather than being partitioned by threads. That is, every thread sees the entire tile of data and the tile is not reordered so that the descriptor can be constructed on it. In `ALayout` form, this can be expressed ```cpp // (T128,V64x8) -> (M64,K16) using ALayout = Layout<Shape <_128, Shape <_64,_16>>, Stride< _0, Stride< _1,_64>>>; ``` That is, all threads are mapped the to `(m,k) = (0,0) = 0` element and the values (and shape of the values) remains unchanged. The GMMA Descriptor Constructor can then inspect the `(M,K)` layout of this data and create an appropriate GMMA Descriptor or produce an error message saying the data is in an invalid layout for GMMA. ## `TiledMMA`s We can make more complex patterns by combining and interleaving multiple atoms. Let's start with `SM70_8x8x4_F32F16F16F32_NT`. ```cpp MMA_Atom mma = MMA_Atom<SM70_8x8x4_F32F16F16F32_NT>{}; print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_Atom.png" alt="HMMA.8x8x4.NT_Atom.png" height="400"/> </p> The above is equivalent to ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape<_1,_1,_1>>{}, // Layout of Atoms Tile<_8,_8,_4>{}); // Tiler print_latex(mma); ``` as it is a single atom and has a natural tile size of 8x8x4. We can create an object akin to a WMMA by using four of these quadpair MMAs: ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}); // 2x2 n-major layout of Atoms print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2.png" alt="HMMA.8x8x4.NT_2x2.png" height="400"/> </p> This `TiledMMA` replicates the `MMA_Atom` across threads as we can see the `T4` and `T8` and `T12` threads in the `C`-matrix that were not used before. Each quadrant of the `C`-matrix is a replica of the atom's partitioning pattern for a new quadpair and this replication follows a `(2,2):(2,1)` layout. The above represents a 16x16x4 MMA now, but we can immediately expand this "tile size" up to 32x32x4 instead: ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}, // 2x2 n-major layout of Atoms Tile<_32,_32,_4>{}); // 32x32x4 tiler print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2_32x32x4.png" alt="HMMA.8x8x4.NT_2x2_32x32x4.png" height="400"/> </p> This `TiledMMA` replicates the previous `TiledMMA` across values instead of threads. We can see the `T0V8` and `T16V8` and `T8V8` values in the `C`-matrix that were not used before. Each quadrant of the `C`-matrix is a replica of the previous `TiledMMA`'s partitioning pattern for a new set of values. Continuing, we see that there are eight values that `T0` receives from the `A`-matrix. Those reads occur at coordinates ``` T0V0 => ( 0,0) T0V1 => ( 1,0) T0V2 => ( 2,0) T0V3 => ( 3,0) T0V4 => (16,0) T0V5 => (17,0) T0V6 => (18,0) T0V7 => (19,0) ``` which are separate, but we might prefer them to be next to each other. That is we would like to permute the `M`-mode to create another valid `TiledMMA`. ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}, // 2x2 n-major layout of Atoms Tile<Layout<Shape <_4,_4,_2>, Stride<_1,_8,_4>>, // Permutation on M, size 32 _32, // Permutation on N, size 32 identity _4>{}); // Permutation on K, size 4 identity print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2_32Mx32x4.png" alt="HMMA.8x8x4.NT_2x2_32Mx32x4.png" height="400"/> </p> That layout `(4,4,2):(1,8,4)` is read like a scatter permutation, telling the m-coords of the original image where to go in the new image. ``` old m-coord: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 new m-coord: 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31 ``` This permutes only the M-mode (in `A` and `C` accordingly) and brings the access of all threads to be contiguous in m-coordinates in the `A`-matrix. This is convenient when designing layouts for shared memory or registers, for example. The MMA instructions contained within the image above are now effectively interleaved in the logical m-coordinates. Of course, permutations in the N-mode and K-mode are also valid. To see how these `TiledMMA`s are used to partition data tensors, see the [`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md).
cutlass/media/docs/cute/0t_mma_atom.md/0
{ "file_path": "cutlass/media/docs/cute/0t_mma_atom.md", "repo_id": "cutlass", "token_count": 9068 }
43
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Profiler") [README](../../README.md#documentation) > **CUTLASS Profiler** # CUTLASS Profiler The CUTLASS Profiler is a command-line driven test and profiling environment for CUTLASS computations defined in the CUTLASS Instance Library. The CUTLASS Profiler is capable of executing each GEMM, Sparse Gemm, Conv2d, and Conv3d kernel. The CUTLASS Profiler may be compiled with: ```bash $ make cutlass_profiler -j ``` To limit compilation time, only one tile size (typically 128x128) and threadblock cluster size (typically 2x1x1) is instantiated for each data type, math instruction, and layout. To instantiate all sizes, set the following environment variable when running CMake from an empty `build/` directory. ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=all -DCUTLASS_UNITY_BUILD_ENABLED=ON ... $ make cutlass_profiler -j ``` Enabling the unity build places multiple kernel instances in one compilation unit, thereby reducing size of the compiled binary and avoiding linker limitations on some platforms. The CUTLASS Profiler sources are stored in ```bash tools/ profiler/ ``` The CUTLASS Profiler usage statement may be obtained by executing `cutlass_profiler --help` and appears as follows. ```bash CUTLASS Performance Tool usage: cutlass_profiler [options] --help --mode=<string> Cutlass profiler execution mode. --mode=profile regular verification and profiling (default) --mode=dry_run no kernels are launched or workspaces allocated --mode=enumerate lists all operation kind and operations --mode=trace executes a single device-side computation with no other kernel launches --device-info Prints information on all GPUs present in the system --operation=<operation_kind> CUTLASS operation to profile. --kernels=<string_list> Filter operations by kernel names. For example, call all kernels with ("s1688" and "nt") or ("s844" and "tn" and "align8") in their operation name using --kernels="s1688*nt, s884*tn*align8" --ignore-kernels=<string_list> Excludes kernels whose names match anything in this list. Device: --device=<int> CUDA Device ID --compute-capability=<int> Override the compute capability. --llc-capacity=<capacity in KiB> Capacity of last-level cache in kilobytes. If this is non-zero, profiling phases cycle through different input tensors to induce capacity misses in the L2. Initialization: --initialization=<bool> Enables initialization (default: true). If false, device memory is not initialized after allocation. --initialization-provider=<provider> Selects initialization provider {host, device*}. (default: '*') --dist=<distribution> Data distribution of input tensors {uniform*, gaussian, identity, sequential} --dist=uniform,min:<double>,max:<double>,scale:<integer> --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer> --dist=sequential,start:<double>,delta:<double>,scale:<integer> --dist=identity --seed=<int> Random number generator seed. Used to enforce deterministic initialization. Library: --library-algo-mode=<mode> Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN. mode={default*,matching,best} --library-algos=<range-list> If --algorithm-mode=best, permits specifying a selection of algorithms. Profiling: --workspace-count=<workspace count> Number of discrete workspaces maintained to avoid cache-resident If zero (default), the amount is chosen for each workload based on capacity of the last-level cache. --profiling-iterations=<iterations> Number of iterations to profile each kernel. If zero, kernels are launched up to the profiling duration. --warmup-iterations=<iterations> Number of iterations to execute each kernel prior to profiling. --sleep-duration=<duration> Number of ms to sleep between profiling periods (ms). --profiling-enabled=<bool> If true, profiling is actually conducted. Verification: --verification-enabled=<bool> Whether to perform verification checks. --epsilon=<error> Error threshold. Setting to zero (default) requires bit-level equivalence. --nonzero-floor=<floor> Results whose absolute value is less than this quantity are treated as zero for comparisons. --save-workspace=<string> Specifies when to save the GEMM inputs and results to the filesystem. --save-workspace=never never save workspace (default) --save-workspace=incorrect save workspace for incorrect results --save-workspace=always always save workspace --verification-providers=<providers> List of providers used to verify result. (default: '*') Gemm verification-providers {cublas*} Conv2d verification-providers {cudnn*, device*, host} Report: --append=<bool> If true, result is appended to possibly existing file. Otherwise, any existing file is overwritten. --output=<path> Path to output file for machine readable results. Operation kind and '.csv' is appended. --junit-output=<path> Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended. --report-not-run=<bool> If true, reports the status of all kernels including those that do not satisfy the given arguments. --tags=<column:tag,...> Inserts leading columns in output table and uniform values for each column. Useful for generating pivot tables. --verbose=<bool> Prints human-readable text to stdout. If false, nothing is written to stdout. About: --version CUTLASS 2.4.0 built on Nov 19 2020 at 11:59:00 Operations: gemm General matrix-matrix product. D = alpha * A*B + beta * C spgemm Structured sparse GEMM. D = alpha * A*B + beta * C conv2d Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D) conv3d Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D) For details about a particular function, specify the function name with --help. Example: $ cutlass_profiler --operation=Gemm --help $ cutlass_profiler --operation=Conv3d --help $ cutlass_profiler --operation=Conv2d --help ``` # GEMM The CUTLASS Profiler is capable of executing GEMM and Sparse GEMM problems. The CUTLASS Profiler can be built with cuBLAS enabled to use as a reference implementation. If CMake detects the cuBLAS library available in the system, it is included as a dependency. This may be explicitly overridden with CMake flag `CUTLASS_ENABLE_CUBLAS`. ## GEMM Arguments The complete set of arguments available to each operation may be viewed by specifying the operation name in addition to `--help`. The argument flags and their aliases usable for GEMM appear as follows. ```bash $ ./tools/profiler/cutlass_profiler --operation=gemm --help GEMM [enum] --gemm_kind Variant of GEMM (e.g. universal, gemm, planar_complex, planar_complex_array) [int] --m,--problem-size::m M dimension of the GEMM problem space [int] --n,--problem-size::n N dimension of the GEMM problem space [int] --k,--problem-size::k K dimension of the GEMM problem space [tensor] --A Tensor storing the A operand [tensor] --B Tensor storing the B operand [tensor] --C Tensor storing the C operand [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha [scalar] --beta,--epilogue::beta Epilogue scalar beta [enum] --split_k_mode,--split-k-mode Variant of split K mode(serial, parallel) [int] --split_k_slices,--split-k-slices Number of partitions of K dimension [int] --batch_count,--batch-count Number of GEMMs computed in one batch [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma). [enum] --accum,--accumulator-type Math instruction accumulator data type [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension [int] --min_cc,--minimum-compute-capability Minimum device compute capability [int] --max_cc,--maximum-compute-capability Maximum device compute capability Examples: Profile a particular problem size: $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128 Schmoo over problem size and beta: $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5 Schmoo over accumulator types: $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32 Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t): $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row Using various input value distribution: $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3 $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3 $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1 Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size): $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv: $ cutlass_profiler --operation=Gemm \ --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ --k=8,16,32,64,128,256,288,384,504,512,520 \ --beta=0,1,2 --profiling-iterations=1 \ --providers=cutlass --output=functional-test.csv ``` The format of tensor argument is followed by `<type>:<layout>`. The type could be `f32` as 32-bit floating point, `s8` as 8-bit signed integer, etc. The available types can be referred to the `NumericTypeID_enumerants` in [util.cu](tools/library/src/util.cu). The layout could be `row` or `column`. ## Example CUDA Core GEMM Operation Example command line for profiling SGEMM kernels is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 115992428544 flops Runtime: 6.73655 ms Memory: 24.934 GiB/s Math: 17218.4 GFLOP/s ``` Note, the arguments which appear in the output may be used as command line parameters for subsequent invocations. ## Example Tensor Core GEMM Operations To execute kernels targeting Tensor Core operations, supply the flag `--op_class=tensorop` in the command line. ```bash $ ./tools/profiler/cutlass_profiler --op_class=tensorop --m=3456 --n=4096 --k=8192 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_tensorop_s16816gemm_f16_256x128_32x3_nn_align8 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=8192 --A=f16:column --B=f16:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 --cta_k=32 --stages=3 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 231956545536 flops Runtime: 0.98647 ms Memory: 170.272 GiB/s Math: 235138 GFLOP/s ``` ## Covering the problem space All arguments may have single values or comma-delimited set of values. Integers may also be specified as an inclusive range with the following syntax `start:end:increment` or simply `start:end`. For example, the following sweeps over the range of the GEMM K dimension from 8 to 4096 in increments of 8 elements. ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn --m=4352 --n=4096 --k=8:4096:8 ``` ## Output By default, runtime and computed GFLOP/s are reported for each operation and problem size. Additionally, a table of comma separated values are reported at the end of the execution. This may be output to a file with the `--output=<filename.csv>` command line option as shown: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ --m=3456 --n=4096 --k=8:4096:8 --output=report.csv ``` To faclitate generation of pivot tables and charts, additional columns may be prepended with the `--tags=<column>:<value>` option. One or more tags may be specified using a comma-delimited list. ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ --m=3456 --n=4096 --k=8:4096:8 --output=report.csv \ --tags=cutlass:2.2,date:2020-06-08 ``` ## CUTLASS 3.0 GEMM procedural names CUTLASS 3.0 introduces a new naming convention for GEMMs used by the profiler targeting the NVIDIA Hopper architecture and beyond so as to indicate new features of the kernel within the name (e.g., the cluster shape). To best illustrate this naming convention, we will walk through the meaning of each of the components in a GEMM kernel used by the profiler: ``` cutlass3x_sm90_tensorop_s64x128x16gemm_f16_f16_f32_f16_f32_128x128x64_2x1x1_0_ntn_align8 ``` The components within this name are as follows: * `cutlass3x`: indicates that the kernel was generated through the CUTLASS 3.0 API * `sm90`: indicates that the kernel targets NVIDIA GPUs with compute capability 90 * `tensorop`: indicates that the kernel makes use of NVIDIA Tensor Cores (as opposed to `simt`, which indicates the use of "CUDA cores") * `s`: indicates that the Tensor Core instruction being used accumulates in single precision (as opposed to `h`, which indicates half precision) * `64x128x16gemm`: indicates that the shape of the Tensor Core instruction being used (MxNxK) is 64x128x16 * `f16_f16_f32_f16_f16`: indicates that the data types for operands A, B, Accumulator, C and D (in that order). * `128x128x64`: indicates that the thread block shape used in the GEMM (MxNxK) is 128x128x64 * `2x1x1`: indicates that the cluster shape being used is 2x1x1 * `0`: indicates that the kernel uses the CollectiveBuilder's automatic stage calculation to determine the number of pipeline stages in the kernel. Note that `0` does not mean that no stages are used. A nonzero value indicates that automatic stage calculation is not performed and indicates the number of pipeline stages to be used. This 0 is only added to the kernel's procedural name, the profiler will still report the actual stage count when printing the kernel argument details (`--stages=N`) and kernel discovery will still support filtering through the `--stages` argument. * `ntn`: indicates that the layouts for operands A, B, and C are column major ("n"; non-transposed), row major ("t"; transposed), and column major, respectively. * `align8`: indicates that the maximum alignment between operands A and B is 8. Note that in some special cases where the input A/B types do not match that of the MMA instruction's, the MMA facing input type is added to the instruction string as well. ``` cutlass3x_sm90_tensorop_s64x128x8tf32gemm_f32_f32_f32_f32_f32_128x128x32_2x1x1_0_tnn_align4 ``` * `s64x128x8tf32gemm`: indicates that the MMA consumes inputs in `tf32` format, and therefore the kernel performs rounding of the `f32` values in global memory while loading them into shared memory. For custom mainloop or epilogue schedules, details of the opted-in schedule are appended to the end of the kernel name. For example, ``` cutlass3x_sm90_tensorop_h64x128x16gemm_f16_f16_f16_void_f16_128x128x64_1x1x1_0_nnn_align8_warpspecialized_cooperative_epi_tma ``` * `warpspecialized_cooperative`: Mainloop employs a persistent warp-specialized mainloop and kernel schedule. * `epi_tma`: Kernel epilogue employs TMA based vectorization. * `f16_f16_f16_void_f16`: In this case, C type is set to `void`, indicating that residual matrix support is disabled. # Convolution The CUTLASS Profiler is capable of executing 2-D and 3-D convolution problems for forwards and backwards operator variants. The CUTLASS Profiler can be built with cuDNN enabled to use as a reference implementation. If CMake detects the cuDNN library available in the system, it is included as a dependency. This may be explicitly overridden with CMake flag `CUTLASS_ENABLE_CUDNN`. ```bash $ cmake .. -DCUTLASS_LIBRARY_OPERATIONS=conv2d -DCUTLASS_ENABLE_CUDNN=OFF ... $ make -j16 cutlass_profiler ``` ## Convolution Arguments ```bash $ ./tools/profiler/cutlass_profiler --help --operation=Conv2d Conv2d [enum] --conv_kind Convolutional operator (fprop, dgrad, wgrad) [int] --n,--input_n Input N dimension of the Conv2d problem space [int] --h,--input_h Input H dimension of the Conv2d problem space [int] --w,--input_w Input W dimension of the Conv2d problem space [int] --c,--input_c Input C dimension of the Conv2d problem space [int] --k,--filter_k Filter K dimension of the Conv2d problem space [int] --r,--filter_r Filter R dimension of the Conv2d problem space [int] --s,--filter_s Filter S dimension of the Conv2d problem space [int] --p,--output_p Output P dimension of the Conv2d problem space [int] --q,--output_q Output Q dimension of the Conv2d problem space [int] --g,--groups Number of convolution groups [int] --pad_h Padding in H direction [int] --pad_w Padding in W direction [int] --stride_h Stride in H direction [int] --stride_w Stride in W direction [int] --dilation_h Dilation in H direction [int] --dilation_w Dilation in W direction [tensor] --Activation Tensor storing the Activation operand [tensor] --Filter Tensor storing the Filter operand [tensor] --Output Tensor storing the Output operand [enum] --conv_mode Convolution filter mode (conv, cross) [enum] --iterator_algorithm,--iterator_algo Convolution iterator algorithm (analytic, optimized) [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha [scalar] --beta,--epilogue::beta Epilogue scalar beta [enum] --split_k_mode,--split-k-mode SplitK mode for serial or parallel reduction (serial, parallel) [int] --split_k_slices,--split-k-slices Number of partitions of K dimension [enum] --eq_gemm_provider,--eq-gemm-provider Enable profiling equivalent gemm by the following providers (cutlass) [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma) [enum] --accum,--accumulator-type Math instruction accumulator data type [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension [int] --min_cc,--minimum-compute-capability Minimum device compute capability [int] --max_cc,--maximum-compute-capability Maximum device compute capability Examples: Profile a particular convolution (specify all the convolution parameters): $ cutlass_profiler --operation=Conv2d --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32 --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3 --pad_h=1 --pad_w=1 --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 ``` ## Example CUDA Core Convolution Operation Example command line for profiling forward propagation convolution kernels on CUDA cores is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=simt_sfprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 2055798784 bytes FLOPs: 118482796544 flops Runtime: 8.13237 ms Memory: 235.431 GiB/s Math: 14569.3 GFLOP/s ``` ## Example Tensor Core Convolution Operation Example command line for profiling forward propagation convolution kernels runing on Tensor Cores is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=tensorop*fprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_64x4_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=64 --stages=4 \ --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 1130659840 bytes FLOPs: 118482796544 flops Runtime: 0.945071 ms Memory: 1114.21 GiB/s Math: 125369 GFLOP/s ``` # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/profiler.md/0
{ "file_path": "cutlass/media/docs/profiler.md", "repo_id": "cutlass", "token_count": 13662 }
44
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Load nodes and implementations """ import ctypes from cutlass.backend.c_types import tuple_factory from cutlass.backend.epilogue import dtype2ctype, to_ctype_value from cutlass.backend.evt.ir.node import NodeBase, ImplBase class LoadImplBase(ImplBase): """ Base class for load node implementations """ reserved_names = ["accum", "C"] def __init__(self, node) -> None: super().__init__(node) self.element = node.element self.element_output = node.element_output self.stride = node.tensor.stride class AccumulatorImpl(LoadImplBase): """ Accumulator node implementation """ @staticmethod def match(node, problem_size: tuple): return node.name == "accum" and node.tensor.shape == problem_size class LoadSrcImpl(LoadImplBase): """ Load C implementation """ @property def name_camel(self) -> str: return "TensorC" @property def argument_type_c(self): stride_mnl = self.get_stride_mnl() tuple_type = tuple_factory(stride_mnl, self.stride_dtype) class _Argument(ctypes.Structure): _fields_ = [ ("ptr_C", ctypes.c_void_p), ("stride_C", tuple_type) ] def __init__(self, ptr) -> None: self.ptr_C = ptr self.stride_C = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): return node.name == "C" and node.tensor.shape == problem_size class AuxLoadImpl(LoadImplBase): """ Load arbitrary tensor """ @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_aux", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dAux", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_aux = ptr self.null_default = to_ctype_value(0, element_type) self.dAux = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if (strideMN[0] == 1 and strideMN[1] != 0 or strideMN[0] != 0 and strideMN[1] == 1 ): return True else: return False class RowBroadcastImpl(LoadImplBase): """ Broadcast a row vector """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_row", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dRow", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_row = ptr self.null_default = to_ctype_value(0, element_type) self.dRow = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (0, 1): return True else: return False class ColumnBroadcastImpl(LoadImplBase): """ Broadcast a column vector """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_col", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dCol", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_col = int(ptr) self.null_default = to_ctype_value(0, element_type) self.dCol = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (1, 0): return True else: return False class ScalarBroadcastImpl(LoadImplBase): """ Broadcast a scalar """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element if self.tensor.is_constant: value = self.tensor.value class _Argument(ctypes.Structure): _fields_ = [ ("scalars", dtype2ctype[element_type]), ("scalar_ptrs", ctypes.c_void_p), ("dScalar", tuple_type) ] def __init__(self, kwargs) -> None: self.scalars = to_ctype_value(value, element_type) self.scalar_ptrs = 0 self.dScalar = tuple_type(stride_mnl) else: class _Argument(ctypes.Structure): _fields_ = [ ("scalars", dtype2ctype[element_type]), ("scalar_ptrs", ctypes.c_void_p), ("dScalar", tuple_type) ] def __init__(self, kwargs) -> None: scalar_or_ptr = kwargs[name] if isinstance(scalar_or_ptr, float): self.scalars = to_ctype_value(scalar_or_ptr, element_type) self.scalar_ptrs = 0 else: self.scalar_ptrs = int(scalar_or_ptr) self.dScalar = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (0, 0): return True else: return False class LoadNode(NodeBase): """ Load Node """ cnt = 0 possible_impls = [ AccumulatorImpl, LoadSrcImpl, AuxLoadImpl, RowBroadcastImpl, ColumnBroadcastImpl, ScalarBroadcastImpl ] def __init__(self, name: str) -> None: if name is None: name = f"load{LoadNode.cnt}" LoadNode.cnt += 1 super().__init__(name) self.op = "load" def type_propagation(self, *args, **kwargs): """ Load node loads tensor under type `tensor.element` and returns an array of type `tensor.element`. """ if self.tensor is None: raise RuntimeError(f"The tensor of node {self.name} is unknown.") self.element = self.tensor.element self.element_output = self.tensor.element
cutlass/python/cutlass/backend/evt/ir/load_nodes.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/ir/load_nodes.py", "repo_id": "cutlass", "token_count": 4401 }
45
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Classes containing valid operations for a given compute capability and data types. """ from itertools import combinations_with_replacement import logging from cuda import __version__ import cutlass_library from cutlass_library.library import ConvKind, IteratorAlgorithm, StrideSupport, GroupMode import cutlass from cutlass.utils.check import valid_stage_count from cutlass.utils.datatypes import td_from_profiler_td, td_from_profiler_op _generator_ccs = [50, 60, 61, 70, 75, 80, 90] # Strip any additional information from the CUDA version _cuda_version = __version__.split("rc")[0] class KernelsForDataType: """ Container class for keeping track of kernels that correspond to a particular combination of data types for operands A, B, and accumulator """ def __init__(self, datatype_comb: tuple, layout_comb: tuple): self.datatype_comb = datatype_comb self.layout_comb = layout_comb self.math_operations = set() # Dictionary mapping from alignment (int) to a list of kernels that fit the alignment # constraint for the data type combination self.kernels_by_alignment = {} def add(self, operation): """ Add an operation to the list of supported kernels """ alignment_key = f"{operation.A.alignment} {operation.B.alignment} {operation.C.alignment}" if alignment_key not in self.kernels_by_alignment: self.kernels_by_alignment[alignment_key] = [] self.kernels_by_alignment[alignment_key].append(operation) self.math_operations.add(operation.tile_description.math_instruction.math_operation) def alignments(self, operand: str): """ Returns an unsorted list of alignments supported by this data type combination :param operand: identifier of operand in question (e.g., A, B, C) :type operand: str :return: unsorted list of alignments supported by this data type combination :rtype: list """ operand_idx = self._operand_idx(operand) return [int(key.split(" ")[operand_idx]) for key in self.kernels_by_alignment.keys()] @property def all_operations(self): """ Returns a list of all operations supported by this data type combination :return: list of all operations supported by this data type combination :rtype: list """ ops = [] for _, alignment_ops in self.kernels_by_alignment.items(): ops.extend(alignment_ops) return ops def default_operation(self, math_operation: cutlass.MathOperation): key = sorted(list(self.kernels_by_alignment.keys()))[0] kernels = self.kernels_by_alignment[key] if math_operation is not None: kernels = [x for x in kernels if x.tile_description.math_instruction.math_operation == math_operation] return kernels[0] def operations(self, alignment_A: int, alignment_B: int, alignment_C: int, math_operation: cutlass.MathOperation): """ Returns operations satisfying the alignment constraints :param alignment_A: alignment constraint of operations to return :type alignment_A: int :param alignment_B: alignment constraint of operations to return :type alignment_B: int :param alignment_C: alignment constraint of operations to return :type alignment_C: int :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: list of operations :rtype: list """ key = f"{alignment_A} {alignment_B} {alignment_C}" if key not in self.kernels_by_alignment: og_key = key # Reconcile A, B, and C alignments by trying to align to the minimum min_alignment = min(alignment_A, alignment_B, alignment_C) key = f"{min_alignment} {min_alignment} {min_alignment}" if key not in self.kernels_by_alignment: # Finally, go through all available alignment combinations and find # one for which all values are less than those passed in. key = None alignments = sorted([tuple(int(x) for x in k.split(" ")) for k in self.kernels_by_alignment.keys()], reverse=True) for align_A, align_B, align_C in alignments: if alignment_A % align_A == 0 and alignment_B % align_B == 0 and alignment_C % align_C == 0: key = f"{align_A} {align_B} {align_C}" break if key is None: raise Exception( f"No operations of alignment {og_key} found for data type and layout " f"combination {self.datatype_comb} {self.layout_comb}. Compatible alignments " f"are {self.kernels_by_alignment.keys()}" ) ops = self.kernels_by_alignment[key] if math_operation is not None: ops = [op for op in ops if op.tile_description.math_instruction.math_operation == math_operation] return ops def _operand_idx(self, key: str) -> int: operand_list = ["A", "B", "C"] if key not in operand_list: raise Exception(f"Unexpected operand {operand}") return operand_list.index(key) def find_alignment(self, shape: tuple, layout: cutlass.LayoutType, operand=str) -> int: """ Returns the most preferable alignment for a given shape and layout :param shape: extent of each dimension of the tensor :type shape: tuple :param layout: layout of the tensor :type layout: cutlass.LayoutType :param operand: descriptor of the operand in question :type operand: str :return: maximum alignment supported by the data type combination and tensor size :rtype: int """ operand_idx = self._operand_idx(operand) # Determine the leading dimension of the shape if layout == cutlass.LayoutType.ColumnMajor: ld = shape[-2] elif layout == cutlass.LayoutType.RowMajor: ld = shape[-1] elif layout == cutlass.LayoutType.TensorNHWC: ld = shape[-1] else: raise Exception(f"Unexpected or unsupported layout {layout}") for alignments in sorted(list(self.kernels_by_alignment.keys()), reverse=True): alignment = int(alignments.split(" ")[operand_idx]) if ld % alignment == 0: return alignment # Default to alignment of 1 if no others match return 1 def sort(self): """ Sorts each list of kernels in `kernels_by_alignment` in descending order of threadblock shape """ key = lambda op: ( op.tile_description.threadblock_shape[0] * op.tile_description.threadblock_shape[1] * op.tile_description.threadblock_shape[2] ) for alignment in self.kernels_by_alignment.keys(): self.kernels_by_alignment[alignment].sort(key=key, reverse=True) def supports_math_operation(self, math_operation: cutlass.MathOperation) -> bool: """ Returns whether `math_operation` is supported by at least one operation. :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: whether math_operation is supported by at least one operation :rtype: bool """ return math_operation is None or math_operation in self.math_operations class ArchOptions: """ Structure for keeping track of kernels available on a given compute capability :param target_cc: compute capability of the device on which kernels will be run :type target_cc: int :param kernel_cc: compute capability of the kernels to generate :type kernel_cc: int :param operation_kind: type of operation to register :type operation_kind: cutlass_library.OperationKind :param gemm_kinds: types of GEMM operations that can be included :type gemm_kinds: list :param allowed_math_operations: types of primitive math operations allowed :type allowed_math_operations: list """ def __init__( self, target_cc: int, kernel_cc: int, operation_kind: cutlass_library.OperationKind, gemm_kinds: list, allowed_math_operations: list = [ cutlass_library.MathOperation.multiply_add, cutlass_library.MathOperation.multiply_add_saturate, cutlass_library.MathOperation.multiply_add_mixed_input_upcast, cutlass_library.MathOperation.multiply_add_fast_f32 ] ): self.cc = kernel_cc # Dictionary with following structure: # Key: OpcodeClass # Value: Dictionary with the following structure: # Key: tuple of ((DataType, DataType, DataType), (LayoutType, LayoutType, LayoutType), # representing ((element_a, element_b, element_accumulator), (layout_a, layout_b)) # Value: KernelsForDataType self.operations_by_opclass = {} self.op_class = None self.allowed_math_operations = allowed_math_operations # Identify the method within CUTLASS generator script that generates kernel # descriptions for the target CC generate_function_name = "GenerateSM" + str(kernel_cc) if not hasattr(cutlass_library.generator, generate_function_name): cutlass.logger.warning(f"No generator found for architecture {kernel_cc}") return generate_function = getattr(cutlass_library.generator, generate_function_name) # Initialize a default manifest and populate it with valid kernel descriptions # for the target CC args = [ "--kernels=all", f"--log-level={logging.getLevelName(cutlass.logger.level)}" ] manifest_args = cutlass_library.generator.define_parser().parse_args(args) manifest = cutlass_library.manifest.Manifest(manifest_args) generate_function(manifest, _cuda_version) if operation_kind not in manifest.operations: # No kernels generated for this architecture, this could be because the CUDA # toolkit is insufficient to support operations in this CC cutlass.logger.warning(f"No operations of type {operation_kind} found for CC {kernel_cc}") return # Only one CC should be returned, given the setup above of calling only the generation scripts # for a given CC if len(manifest.operations[operation_kind].keys()) != 1 or kernel_cc not in manifest.operations[operation_kind]: raise Exception(f"Error finding kernels for SM{kernel_cc}. Check that your CUDA toolkit version " "is sufficient for the architecture in question.") # Iterate through the available operations for this operation kind and # find available opclasses and data types for name, op_list in manifest.operations[operation_kind][kernel_cc].items(): for op in op_list: if operation_kind == cutlass_library.OperationKind.Gemm: if op.gemm_kind not in gemm_kinds: continue mi = op.tile_description.math_instruction if mi.math_operation not in self.allowed_math_operations: continue # Prune operations that don't fit in shared memory td = td_from_profiler_op(op) if not valid_stage_count(target_cc, kernel_cc, td, verbose=False)[0]: continue if mi.opcode_class not in self.operations_by_opclass: self.operations_by_opclass[mi.opcode_class] = {} datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) layout_comb = (op.A.layout, op.B.layout) # Register TF32 kernels as F32 to enable F32 -> TF32 conversion + TF32 Tensor Core operations if datatype_comb == (cutlass_library.DataType.tf32, cutlass_library.DataType.tf32, cutlass_library.DataType.f32): # TF32 kernels only supported on SM80 and beyond if self.cc < 80: continue elif self.cc == 90: if (op.A.element != cutlass_library.DataType.f32 or op.B.element != cutlass_library.DataType.f32 or op.C.element != cutlass_library.DataType.f32): continue datatype_comb = (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32) opclass_dict = self.operations_by_opclass[mi.opcode_class] key = (datatype_comb, layout_comb) if key not in opclass_dict: opclass_dict[key] = KernelsForDataType(datatype_comb, layout_comb) opclass_dict[key].add(op) # Set the default opclass to TensorOp, if available. Otherwise default to SIMT if cutlass_library.OpcodeClass.TensorOp in self.operations_by_opclass: self.op_class = cutlass_library.OpcodeClass.TensorOp else: self.op_class = cutlass_library.OpcodeClass.Simt # The profiler's generator may generate only a limited set of combinations of operands for SIMT kernels. # Here, we generate additional versions via a generic TileDescription. if cutlass_library.OpcodeClass.Simt not in self.operations_by_opclass: self.operations_by_opclass[cutlass_library.OpcodeClass.Simt] = {} if operation_kind == cutlass_library.OperationKind.Gemm: types = [ (cutlass_library.DataType.s8, cutlass_library.DataType.s8, cutlass_library.DataType.s8), (cutlass_library.DataType.s8, cutlass_library.DataType.s8, cutlass_library.DataType.s32), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f16), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f32), (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32), (cutlass_library.DataType.f64, cutlass_library.DataType.f64, cutlass_library.DataType.f64), ] # Add FP8 A/B/C fp8_types = [cutlass_library.DataType.e4m3, cutlass_library.DataType.e5m2] for type_comb in combinations_with_replacement(fp8_types, 3): types.append(type_comb) # Add FP8 A/B with FP32 C for type_comb in combinations_with_replacement(fp8_types, 2): types.append(type_comb + (cutlass.DataType.f32,)) layouts = [ (cutlass_library.LayoutType.RowMajor, cutlass_library.LayoutType.RowMajor), (cutlass_library.LayoutType.RowMajor, cutlass_library.LayoutType.ColumnMajor), (cutlass_library.LayoutType.ColumnMajor, cutlass_library.LayoutType.RowMajor), (cutlass_library.LayoutType.ColumnMajor, cutlass_library.LayoutType.ColumnMajor), ] elif operation_kind == cutlass_library.OperationKind.Conv2d: types = [ (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f16), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f32), (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32), (cutlass_library.DataType.f64, cutlass_library.DataType.f64, cutlass_library.DataType.f64), ] layouts = [ (cutlass_library.LayoutType.TensorNHWC, cutlass_library.LayoutType.TensorNHWC), ] else: raise NotImplementedError(f"Operation kind {operation_kind} is currently unsupported.") alignment = 1 epilogue_functor = cutlass_library.EpilogueFunctor.LinearCombination swizzling_functor = cutlass_library.SwizzlingFunctor.Identity8 for type_comb in types: for layout_comb in layouts: comb = (type_comb, layout_comb) if comb in self.operations_by_opclass[cutlass_library.OpcodeClass.Simt]: continue A = cutlass_library.TensorDescription(type_comb[0], layout_comb[0], alignment) B = cutlass_library.TensorDescription(type_comb[1], layout_comb[1], alignment) C = cutlass_library.TensorDescription(type_comb[2], cutlass_library.LayoutType.ColumnMajor, alignment) math_inst = cutlass_library.MathInstruction( [1, 1, 1], type_comb[0], type_comb[1], type_comb[2], cutlass_library.OpcodeClass.Simt, cutlass_library.MathOperation.multiply_add ) td = cutlass_library.TileDescription( [128, 128, 8], 2, [4, 2, 1], math_inst, 50, 1024) # Prune operations that don't fit in shared memory if not valid_stage_count(target_cc, kernel_cc, td_from_profiler_td(td), verbose=False)[0]: continue new_kernels = KernelsForDataType(type_comb, layout_comb) if operation_kind == cutlass_library.OperationKind.Gemm: new_operation = cutlass_library.manifest.GemmOperation( cutlass_library.GemmKind.Universal, td.minimum_compute_capability, td, A, B, C, type_comb[2], epilogue_functor, swizzling_functor) new_kernels.add(new_operation) elif operation_kind == cutlass_library.OperationKind.Conv2d: for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: new_operation = cutlass_library.manifest.Conv2dOperation( conv_kind, IteratorAlgorithm.Analytic, td.minimum_compute_capability, td, A, B, C, type_comb[2], StrideSupport.Strided, epilogue_functor, swizzling_functor, group_mode=GroupMode.SingleGroup ) new_kernels.add(new_operation) self.operations_by_opclass[cutlass_library.OpcodeClass.Simt][comb] = new_kernels # Sort all operations for oc in self.operations_by_opclass.keys(): for comb in self.operations_by_opclass[oc].keys(): self.operations_by_opclass[oc][comb].sort() def opclass_supports_combination( self, op_class: cutlass_library.OpcodeClass, datatype_comb: tuple, layout_comb: tuple, math_operation: cutlass_library.MathOperation ) -> bool: """ Returns whether the provided operation class supports the provided data type and layout combination :param op_class: operation class to consider :type op_class: cutlass_library.OpcodeClass :param datatype_comb: tuple of data types for (element_A, element_B, element_accumulator) :type datatype_comb: tuple[cutlass_library.DataType] :param layout_comb: tuple of data types for (layout_A, layout_B) :type layout_comb: tuple[cutlass_library.LayoutType] :param math_operation: math operation to consider or None if any can be considered :type math_operation: cutlass.MathOperation :return: set of operation classes that support the provided data type and layout combination :rtype: set """ if op_class not in self.operations_by_opclass: raise Exception(f"Unexpected or unsupported operation class {op_class}") if operations := self.operations_by_opclass[op_class].get((datatype_comb, layout_comb)): if math_operation is not None: return operations.supports_math_operation(math_operation) else: return True return False def supporting_opclasses( self, element_a: cutlass_library.DataType, element_b: cutlass_library.DataType, element_accumulator: cutlass_library.DataType, layout_a: cutlass_library.LayoutType, layout_b: cutlass_library.LayoutType, math_operation: cutlass_library.MathOperation, ) -> set: """ Returns a set of operation classes that support the provided data type combination :param element_a: data type of operand A :type element_a: cutlass_library.DataType :param element_b: data type of operand B :type element_b: cutlass_library.DataType :param element_accumulator: data type of accumulator :type element_accumulator: cutlass_library.DataType :param layout_a: layout of operand A :type layout_a: cutlass_library.LayoutType :param layout_b: layout of operand B :type layout_b: cutlass_library.LayoutType :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: set of operation classes that support the provided data type combination :rtype: set """ supporting_op_classes = set() datatype_comb = (element_a, element_b, element_accumulator) layout_comb = (layout_a, layout_b) for op_class in self.operations_by_opclass.keys(): if self.opclass_supports_combination(op_class, datatype_comb, layout_comb, math_operation): supporting_op_classes.add(op_class) return supporting_op_classes def operations( self, op_class: cutlass_library.OpcodeClass, element_a: cutlass_library.DataType, element_b: cutlass_library.DataType, element_accumulator: cutlass_library.DataType, layout_a: cutlass_library.LayoutType, layout_b: cutlass_library.LayoutType, math_operation: cutlass_library.MathOperation, ) -> KernelsForDataType: """ Returns whether the provided operation class supports the provided data type combination :param op_class: operation class to consider :type op_class: cutlass_library.OpcodeClass :param element_a: data type of operand A :type element_a: cutlass_library.DataType :param element_b: data type of operand B :type element_b: cutlass_library.DataType :param element_accumulator: data type of accumulator :type element_accumulator: cutlass_library.DataType :param layout_a: layout of operand A :type layout_a: cutlass_library.LayoutType :param layout_b: layout of operand B :type layout_b: cutlass_library.LayoutType :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: container of kernels by alignment supported by the provided combination of parameters :rtype: KernelsForDataType """ datatype_comb = (element_a, element_b, element_accumulator) layout_comb = (layout_a, layout_b) if not self.opclass_supports_combination(op_class, datatype_comb, layout_comb, math_operation): raise Exception( f"Data type layout combination {datatype_comb}, {layout_comb} " f"is not supported by opcode class {op_class} on CC {self.cc}." ) return self.operations_by_opclass[op_class][(datatype_comb, layout_comb)] class OptionRegistry: """ Container of all architecture-specific options :param target_cc: compute capability of the device on which operations will be run :type target_cc: int """ def __init__(self, target_cc: int): self.registry = {} gemm_kinds = [cutlass_library.GemmKind.Universal, cutlass_library.GemmKind.Universal3x] operation_kinds = [cutlass_library.OperationKind.Gemm, cutlass_library.OperationKind.Conv2d] # Construct options for each CC for kernel_cc in _generator_ccs: self.registry[kernel_cc] = {} for opkind in operation_kinds: self.registry[kernel_cc][opkind] = ArchOptions(target_cc, kernel_cc, opkind, gemm_kinds) def options_for_cc(self, cc: int, op_kind=cutlass_library.OperationKind.Gemm) -> ArchOptions: return self.registry.get(cc, None)[op_kind]
cutlass/python/cutlass/library_defaults.py/0
{ "file_path": "cutlass/python/cutlass/library_defaults.py", "repo_id": "cutlass", "token_count": 10993 }
46
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for emitting GEMM kernels """ import collections import enum import functools import logging import operator import os.path import shutil try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * except ImportError: from library import * _LOGGER = logging.getLogger(__name__) ################################################################################################### # # Data structure modeling a GEMM operation # ################################################################################################### # class GemmOperation: # def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, D = None, kernel_schedule = KernelScheduleType.ScheduleAuto, epilogue_schedule = EpilogueScheduleType.ScheduleAuto, tile_scheduler = TileSchedulerType.Default ): kinds_3x = { GemmKind.Universal3x, GemmKind.SparseUniversal3x, } self.is_3x = gemm_kind in kinds_3x self.prefix = "3x" if self.is_3x else "" self.operation_kind = OperationKind.Gemm self.arch = arch self.tile_description = tile_description self.gemm_kind = gemm_kind self.A = A self.B = B self.C = C self.D = D if self.D == None: self.D = self.C if not self.is_3x: assert(kernel_schedule == KernelScheduleType.ScheduleAuto) assert(epilogue_schedule == EpilogueScheduleType.ScheduleAuto) self.kernel_schedule = kernel_schedule self.epilogue_schedule = epilogue_schedule self.element_epilogue = element_epilogue self.epilogue_functor = epilogue_functor if self.is_3x and epilogue_functor == EpilogueFunctor.LinearCombination: self.epilogue_functor = EpilogueFunctor3x.LinearCombination self.swizzling_functor = swizzling_functor self.tile_scheduler = tile_scheduler # def is_complex(self): complex_operators = [ MathOperation.multiply_add_complex, MathOperation.multiply_add_complex_gaussian, MathOperation.multiply_add_complex_fast_f32 ] return self.tile_description.math_instruction.math_operation in complex_operators # def is_mixed_input(self): return self.A.element != self.B.element # def is_planar_complex(self): return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray) # def accumulator_type(self): accum = self.tile_description.math_instruction.element_accumulator if self.is_complex(): return get_complex_from_real(accum) return accum # def short_math_name(self): if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: return "g%s" % ShortDataTypeNames[self.accumulator_type()] return ShortDataTypeNames[self.accumulator_type()] # def core_name(self): ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' inst_shape = '' inst_operation = '' intermediate_type = '' math_operations_map = { MathOperation.xor_popc: 'xor', MathOperation.and_popc: 'and', MathOperation.multiply_add_fast_accum: 'fastaccum', } tensor_ops = [ OpcodeClass.TensorOp, OpcodeClass.WmmaTensorOp, OpcodeClass.SparseTensorOp, ] is_tensor_op = self.tile_description.math_instruction.opcode_class in tensor_ops if is_tensor_op: math_op = self.tile_description.math_instruction.math_operation math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' if self.is_3x: inst_shape = "{0}x{1}x{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) else: inst_shape = "{0}{1}{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) inst_shape += math_op_string if self.tile_description.math_instruction.element_a != self.A.element and \ self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind]) # Generates a string representing the MMA instruction. def extended_name(self): ''' Append data types if they differ from compute type. ''' if self.is_complex(): extended_name = "${core_name}" else: if self.C.element != self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${element_c}_${core_name}_${element_a}" if self.is_mixed_input(): extended_name += "_${element_b}" elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${core_name}_${element_a}" if self.is_mixed_input(): extended_name += "_${element_b}" else: extended_name = "${core_name}" extended_name = SubstituteTemplate(extended_name, { 'element_a': DataTypeNames[self.A.element], 'element_b': DataTypeNames[self.B.element], 'element_c': DataTypeNames[self.C.element], 'core_name': self.core_name() }) return extended_name def extended_name_3x(self): '''Generates a string representing the MMA atom. Assumes accumulator type is C type.''' extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( element_a = DataTypeNames[self.A.element], element_b = DataTypeNames[self.B.element], element_acc = DataTypeNames[self.accumulator_type()], element_c = DataTypeNames[self.C.element], element_d = DataTypeNames[self.D.element], core_name = self.core_name()) return extended_name def datatype_name_3x(self): '''Generates a string representing the MMA atom. Assumes accumulator type is C type.''' datatype_name = "{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( element_a = DataTypeNames[self.A.element], element_b = DataTypeNames[self.B.element], element_acc = DataTypeNames[self.accumulator_type()], element_c = DataTypeNames[self.C.element], element_d = DataTypeNames[self.D.element]) return datatype_name # Generates a short string representing the AB layout tags (e.g. nt or tn) def layout_name(self): if self.is_complex() or self.is_planar_complex(): return "%s%s" % ( ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)] ) return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) # Generates a short string representing the ABC layout tags (e.g. ntn or tnn) def layout_name_3x(self): if self.is_complex() or self.is_planar_complex(): return "{}{}{}".format( ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)], ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)]) else: return "{}{}{}".format( ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout], ShortLayoutTypeNames[self.C.layout]) # Generates a short string representing underlying kernel schedule type def kernel_schedule_name_3x(self): return KernelScheduleSuffixes[self.kernel_schedule] # Generates a short string representing underlying epilogue schedule type def epilogue_schedule_name_3x(self): return EpilogueScheduleSuffixes[self.epilogue_schedule] # Generate a short string representing the operation class def opcode_class_name(self): return OpcodeClassNames[self.tile_description.math_instruction.opcode_class] # Generates the full kernel function name def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] if self.arch >= 90: kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}{ct}{cs}_{l}_{s}_align{al}{t}{k}{e}" return kernel_name_template.format( p = self.prefix, ar = self.arch, op = opcode_class_name, ex = self.extended_name_3x(), ct = '_' + 'x'.join([str(i) for i in self.tile_description.tile_shape]) if self.tile_description.tile_shape[0] > 0 else "", cs = '_' + 'x'.join([str(i) for i in self.tile_description.cluster_shape]), l = self.tile_description.stages, s = self.layout_name_3x(), al = str(max(self.A.alignment, self.B.alignment)), t = TileSchedulerSuffixes[self.tile_scheduler], k = self.kernel_schedule_name_3x(), e = self.epilogue_schedule_name_3x()) else: threadblock = self.tile_description.procedural_name() return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format( p = self.prefix, op = opcode_class_name, ex = self.extended_name(), tb = threadblock, l = self.layout_name(), a = str(max(self.A.alignment, self.B.alignment))) # def configuration_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' return self.procedural_name() def __hash__(self): return hash(self.configuration_name()) def __eq__(self, other): return self.configuration_name() == other.configuration_name() ################################################################################################### # # Data structure modeling a grouped GEMM operation # ################################################################################################### # class GroupedGemmOperation(GemmOperation): # def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ scheduler_mode = GroupScheduleMode.Device): super().__init__(gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor, swizzling_functor) self.scheduler_mode = scheduler_mode # def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' base = super().procedural_name() return SubstituteTemplate( base + "_schedule${schedule}", { 'schedule': ShortGroupScheduleModeNames[self.scheduler_mode] }) ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### # class EmitGemmInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.gemm_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::Gemm< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${align_a}, ${align_b}, false, ${math_operation} ${residual} >; """ self.gemm_complex_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::GemmComplex< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${transform_a}, ${transform_b}, ${math_operation} ${residual} >; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) residual = '' values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual } template = self.gemm_complex_template if operation.is_complex() else self.gemm_template return SubstituteTemplate(template, values) ################################################################################################### class EmitSparseGemmInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.gemm_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::SparseGemm< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${align_a}, ${align_b}, false, ${math_operation} ${residual} >; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) residual = '' values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual } template = self.gemm_template return SubstituteTemplate(template, values) ################################################################################################### # class EmitGemmUniversalInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/numeric_types.h", "cutlass/arch/arch.h", "cutlass/arch/mma.h", "cutlass/layout/matrix.h", "cutlass/gemm/device/gemm.h", "cutlass/gemm/device/gemm_universal_adapter.h", "cutlass/gemm/kernel/default_gemm_universal.h", ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} > """ self.gemm_template = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, // transposed B operand ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, // transposed A operand ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ self.gemm_template_interleaved = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): threadblock_shape = operation.tile_description.threadblock_shape warp_count = operation.tile_description.warp_count warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] transpose_layouts = { LayoutType.ColumnMajor: LayoutType.RowMajor, LayoutType.RowMajor: LayoutType.ColumnMajor } if operation.A.layout in transpose_layouts.keys() and \ operation.B.layout in transpose_layouts.keys() and \ operation.C.layout in transpose_layouts.keys(): instance_layout_A = transpose_layouts[operation.A.layout] instance_layout_B = transpose_layouts[operation.B.layout] instance_layout_C = transpose_layouts[operation.C.layout] gemm_template = self.gemm_template else: instance_layout_A, instance_layout_B, instance_layout_C = \ (operation.A.layout, operation.B.layout, operation.C.layout) gemm_template = self.gemm_template_interleaved # # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): epilogue_vector_length = \ min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] values = { 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_functor': epilogue_functor, 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] } return SubstituteTemplate(gemm_template, values) ################################################################################################### class EmitGemmUniversal3xInstance: ''' Responsible for emitting a CUTLASS 3.x template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/gemm/gemm.h", "cutlass/numeric_types.h", "cutlass/gemm/kernel/gemm_universal.hpp", "cutlass/gemm/collective/collective_builder.hpp", "cutlass/epilogue/collective/collective_builder.hpp", ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_d}, ${element_epilogue}, ${element_c}, ${element_epilogue} > """ self.gemm_template = """ using ${operation_name}_epilogue = typename cutlass::epilogue::collective::CollectiveBuilder< ${arch}, ${opcode_class_epi}, cute::Shape<cute::_${tile_shape_epi_m}, cute::_${tile_shape_epi_n}, cute::_${tile_shape_epi_k}>, cute::Shape<${cluster_shape_m}, ${cluster_shape_n}, ${cluster_shape_k}>, ${epi_tile_mn}, ${element_accumulator}, ${element_epilogue}, ${element_c}, ${layout_c}, ${align_c}, ${element_d}, ${layout_d}, ${align_d}, ${epilogue_schedule}, ${epilogue_functor} >::CollectiveOp; using ${operation_name}_mainloop = typename cutlass::gemm::collective::CollectiveBuilder< ${arch}, ${opcode_class_main}, ${element_a}, ${layout_a}, ${align_a}, ${element_b}, ${layout_b}, ${align_b}, ${element_accumulator}, cute::Shape<cute::_${tile_shape_main_m}, cute::_${tile_shape_main_n}, cute::_${tile_shape_main_k}>, cute::Shape<${cluster_shape_m}, ${cluster_shape_n}, ${cluster_shape_k}>, ${stages}, ${kernel_schedule} >::CollectiveOp; // Gemm operator ${operation_name} using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< cute::Shape<int,int,int,int>, ${operation_name}_mainloop, ${operation_name}_epilogue, ${tile_scheduler}>; // Define named type struct ${operation_name} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} { using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>; manifest.append( new ${gemm_kind}<GemmKernel>("${operation_name}")); } ${compile_guard_end} """ # def emit(self, operation): _LOGGER.debug("*** EmitGemmConfigurationLibrary::emit(operation)") _LOGGER.debug("*** operation.procedural_name(): " + operation.procedural_name()) _LOGGER.debug("*** tile_shape: " + str(operation.tile_description.tile_shape)) _LOGGER.debug("*** warp_count: " + str(operation.tile_description.warp_count)) opcode_class_main = operation.tile_description.math_instruction.opcode_class opcode_class_epi = opcode_class_main tile_shape = operation.tile_description.tile_shape instruction_shape = operation.tile_description.math_instruction.instruction_shape cluster_m = operation.tile_description.cluster_shape[0] cluster_n = operation.tile_description.cluster_shape[1] tile_shape_main_m, tile_shape_main_n, tile_shape_main_k = tile_shape tile_shape_epi_m, tile_shape_epi_n, tile_shape_epi_k = tile_shape # account for static/dynamic cluster shapes cta_m = tile_shape[0] // cluster_m if cluster_m > 0 else tile_shape[0] cta_n = tile_shape[1] // cluster_n if cluster_n > 0 else tile_shape[1] # stage count set to zero indicates builder automatic stage selection if operation.tile_description.stages > 0: stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>" else: stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename {str(operation.procedural_name())}_epilogue::SharedStorage))>" epi_tile_mn = "cutlass::epilogue::collective::EpilogueTileAuto" instance_layout_A, instance_layout_B, instance_layout_C , instance_layout_D = \ (operation.A.layout, operation.B.layout, operation.C.layout, operation.D.layout) # 3.0 profiler integration only supports trivial epilogues for now epilogue_vector_length = 1 # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): values = { 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctor3xTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # # Cutlass3x complex kernels' ElementA(B) is a tuple in collective mainloop builder, e.g. cute::tuple<Element, Transform>, Transform : cute::identity / cute::conjugate. element_a = DataTypeTag[operation.A.element] if not operation.is_complex() else f"cute::tuple<{str(DataTypeTag[operation.A.element])},{str(ComplexTransformTag3x[operation.A.complex_transform])}>" element_b = DataTypeTag[operation.B.element] if not operation.is_complex() else f"cute::tuple<{str(DataTypeTag[operation.B.element])},{str(ComplexTransformTag3x[operation.B.complex_transform])}>" epilogue_schedule_type = EpilogueScheduleTag[operation.epilogue_schedule] values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': element_a, 'layout_a': LayoutTag[instance_layout_A], 'element_b': element_b, 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_d': DataTypeTag[operation.D.element], 'layout_d': LayoutTag[instance_layout_D], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class_main': OpcodeClassTag[opcode_class_main], 'opcode_class_epi': OpcodeClassTag[opcode_class_epi], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'tile_shape_epi_m': str(tile_shape_epi_m), 'tile_shape_epi_n': str(tile_shape_epi_n), 'tile_shape_epi_k': str(tile_shape_epi_k), 'tile_shape_main_m': str(tile_shape_main_m), 'tile_shape_main_n': str(tile_shape_main_n), 'tile_shape_main_k': str(tile_shape_main_k), 'cluster_shape_m': 'cute::_' + str(operation.tile_description.cluster_shape[0]) if operation.tile_description.cluster_shape[0] > 0 else "int", 'cluster_shape_n': 'cute::_' + str(operation.tile_description.cluster_shape[1]) if operation.tile_description.cluster_shape[1] > 0 else "int", 'cluster_shape_k': 'cute::_' + str(operation.tile_description.cluster_shape[2]) if operation.tile_description.cluster_shape[2] > 0 else "int", 'instruction_shape_m': str(instruction_shape[0]), 'instruction_shape_n': str(instruction_shape[1]), 'instruction_shape_k': str(instruction_shape[2]), 'kernel_schedule' : str(KernelScheduleTag[operation.kernel_schedule]), 'epilogue_schedule' : str(epilogue_schedule_type), 'epi_tile_mn' : epi_tile_mn, 'epilogue_functor': epilogue_functor, 'stages': stage_count_string, 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'align_c': str(operation.C.alignment), 'align_d': str(operation.C.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'tile_scheduler': str(TileSchedulerTag[operation.tile_scheduler]), } return SubstituteTemplate(self.gemm_template, values) ################################################################################################### # class EmitGemmPlanarComplexInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, ${element_c}, cutlass::layout::RowMajor, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, cutlass::epilogue::thread::LinearCombinationPlanarComplex< ${element_c}, ${alignment_c}, ${element_accumulator}, ${element_epilogue} >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, ${stages}, ${math_operator} >::GemmKernel; struct ${operation_name} : public Operation_${operation_name} { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major transposed_layout_A = TransposedLayout[operation.A.layout] transposed_layout_B = TransposedLayout[operation.B.layout] values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.B.element], 'layout_a': LayoutTag[transposed_layout_B], 'transform_a': ComplexTransformTag[operation.B.complex_transform], 'alignment_a': str(operation.B.alignment), 'element_b': DataTypeTag[operation.A.element], 'layout_b': LayoutTag[transposed_layout_A], 'transform_b': ComplexTransformTag[operation.A.complex_transform], 'alignment_b': str(operation.A.alignment), 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'alignment_c': str(operation.C.alignment), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'stages': str(operation.tile_description.stages), 'math_operator': 'cutlass::arch::OpMultiplyAdd' } return SubstituteTemplate(self.template, values) ################################################################################################### # class EmitGemmPlanarComplexArrayInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, ${element_c}, cutlass::layout::RowMajor, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, cutlass::epilogue::thread::LinearCombinationPlanarComplex< ${element_c}, ${alignment_c}, ${element_accumulator}, ${element_epilogue} >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, ${stages}, ${math_operator} >::GemmArrayKernel; struct ${operation_name} : public Operation_${operation_name} { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major transposed_layout_A = TransposedLayout[operation.A.layout] transposed_layout_B = TransposedLayout[operation.B.layout] values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.B.element], 'layout_a': LayoutTag[transposed_layout_B], 'transform_a': ComplexTransformTag[operation.B.complex_transform], 'alignment_a': str(operation.B.alignment), 'element_b': DataTypeTag[operation.A.element], 'layout_b': LayoutTag[transposed_layout_A], 'transform_b': ComplexTransformTag[operation.A.complex_transform], 'alignment_b': str(operation.A.alignment), 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'alignment_c': str(operation.C.alignment), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'stages': str(operation.tile_description.stages), 'math_operator': 'cutlass::arch::OpMultiplyAdd' } return SubstituteTemplate(self.template, values) ################################################################################################### # class EmitGemmGroupedInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/numeric_types.h", "cutlass/arch/arch.h", "cutlass/arch/mma.h", "cutlass/layout/matrix.h", "cutlass/gemm/device/gemm.h", "cutlass/gemm/kernel/gemm_grouped.h", "cutlass/gemm/kernel/default_gemm_grouped.h", "cutlass/gemm/device/gemm_grouped.h" ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} > """ self.gemm_template = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmGrouped< ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${scheduler_mode}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmGrouped<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): threadblock_shape = operation.tile_description.threadblock_shape warp_count = operation.tile_description.warp_count warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] transpose_layouts = { LayoutType.ColumnMajor: LayoutType.RowMajor, LayoutType.RowMajor: LayoutType.ColumnMajor } instance_layout_A, instance_layout_B, instance_layout_C = \ (operation.A.layout, operation.B.layout, operation.C.layout) # # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): epilogue_vector_length = \ min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] values = { 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_functor': epilogue_functor, 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'scheduler_mode': GroupScheduleModeTag[operation.scheduler_mode], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] } return SubstituteTemplate(self.gemm_template, values) ################################################################################################### # # Emitters functions for all targets # ################################################################################################### class EmitGemmConfigurationLibrary: def __init__(self, operation_path, configuration_name): self.configuration_name = configuration_name self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') self.instance_emitter = { GemmKind.Gemm: EmitGemmInstance, GemmKind.Sparse: EmitSparseGemmInstance, GemmKind.Universal: EmitGemmUniversalInstance, GemmKind.Universal3x: EmitGemmUniversal3xInstance, GemmKind.PlanarComplex: EmitGemmPlanarComplexInstance, GemmKind.PlanarComplexArray: EmitGemmPlanarComplexArrayInstance, GemmKind.Grouped: EmitGemmGroupedInstance } self.gemm_kind_wrappers = { GemmKind.Gemm: 'GemmOperation', GemmKind.Sparse: 'GemmSparseOperation', GemmKind.Universal: 'GemmUniversalOperation', GemmKind.Universal3x: 'GemmUniversal3xOperation', GemmKind.PlanarComplex: 'GemmPlanarComplexOperation', GemmKind.PlanarComplexArray: 'GemmPlanarComplexArrayOperation', GemmKind.Grouped: 'GemmGroupedOperation' } self.wmma_guard_start = "#if defined(CUTLASS_ARCH_WMMA_SM${sm_number}_ENABLED)" self.separator = """ /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.header_template = """ /* Generated by gemm_operation.py - Do not edit. */ """ self.initialize_function_template = """ /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// void initialize_${configuration_name}(Manifest &manifest) { """ self.epilogue_template = """ } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// """ def __enter__(self): _LOGGER.debug("*** EmitGemmConfigurationLibrary::__enter__") _LOGGER.debug("*** configuration_path (file to write): " + str(self.configuration_path)) self.configuration_file = open(self.configuration_path, "w") self.configuration_file.write(self.header_template) self.configuration_file.write(self.separator) self.includes = collections.OrderedDict([ ("cutlass/cutlass.h", None), ("cutlass/library/library.h", None), ("cutlass/library/manifest.h", None), ("library_internal.h", None), ("gemm_operation.h", None), ("gemm_operation_3x.hpp", None), ("cutlass/arch/wmma.h", None), ("cutlass/numeric_types.h", None) ]) self.instance_definitions = [] self.instance_wrappers = [] self.operations = [] return self def emit(self, operation): _LOGGER.debug("*** EmitGemmConfigurationLibrary::emit(operation)") _LOGGER.debug("*** operation.gemm_kind: " + str(operation.gemm_kind)) emitter = self.instance_emitter[operation.gemm_kind]() for incl in emitter.includes: self.includes[incl] = None self.operations.append(operation) self.instance_definitions.append(emitter.emit(operation)) self.instance_wrappers.append(SubstituteTemplate(emitter.instance_template(), { 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'gemm_kind': self.gemm_kind_wrappers[operation.gemm_kind], 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", 'compile_guard_end': "#endif" \ if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" })) def __exit__(self, exception_type, exception_value, traceback): # Write includes for incl, _ in self.includes.items(): include_statement = "#include \"%s\"\n" % incl self.configuration_file.write(include_statement) self.configuration_file.write(self.separator) # Write instance definitions in top-level namespace for instance_definition in self.instance_definitions: self.configuration_file.write(instance_definition) # Add wrapper objects within initialize() function self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { 'configuration_name': self.configuration_name })) for instance_wrapper in self.instance_wrappers: self.configuration_file.write(instance_wrapper) self.configuration_file.write(self.epilogue_template) self.configuration_file.close() ################################################################################################### ###################################################################################################
cutlass/python/cutlass_library/gemm_operation.py/0
{ "file_path": "cutlass/python/cutlass_library/gemm_operation.py", "repo_id": "cutlass", "token_count": 19947 }
47
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-check" width="44" height="44" viewBox="0 0 24 24" stroke-width="2" stroke="#22863a" fill="none" stroke-linecap="round" stroke-linejoin="round"> <path stroke="none" d="M0 0h24v24H0z" fill="none"/> <path d="M5 12l5 5l10 -10" /> </svg>
cutlass/python/docs/_static/check-solid.svg/0
{ "file_path": "cutlass/python/docs/_static/check-solid.svg", "repo_id": "cutlass", "token_count": 130 }
48
/* remove conflicting styling from Sphinx themes */ div.nbinput.container div.prompt *, div.nboutput.container div.prompt *, div.nbinput.container div.input_area pre, div.nboutput.container div.output_area pre, div.nbinput.container div.input_area .highlight, div.nboutput.container div.output_area .highlight { border: none; padding: 0; margin: 0; box-shadow: none; } div.nbinput.container > div[class*=highlight], div.nboutput.container > div[class*=highlight] { margin: 0; } div.nbinput.container div.prompt *, div.nboutput.container div.prompt * { background: none; } div.nboutput.container div.output_area .highlight, div.nboutput.container div.output_area pre { background: unset; } div.nboutput.container div.output_area div.highlight { color: unset; /* override Pygments text color */ } /* avoid gaps between output lines */ div.nboutput.container div[class*=highlight] pre { line-height: normal; } /* input/output containers */ div.nbinput.container, div.nboutput.container { display: -webkit-flex; display: flex; align-items: flex-start; margin: 0; width: 100%; } @media (max-width: 540px) { div.nbinput.container, div.nboutput.container { flex-direction: column; } } /* input container */ div.nbinput.container { padding-top: 5px; } /* last container */ div.nblast.container { padding-bottom: 5px; } /* input prompt */ div.nbinput.container div.prompt pre { color: #307FC1; } /* output prompt */ div.nboutput.container div.prompt pre { color: #BF5B3D; } /* all prompts */ div.nbinput.container div.prompt, div.nboutput.container div.prompt { width: 4.5ex; padding-top: 5px; position: relative; user-select: none; } div.nbinput.container div.prompt > div, div.nboutput.container div.prompt > div { position: absolute; right: 0; margin-right: 0.3ex; } @media (max-width: 540px) { div.nbinput.container div.prompt, div.nboutput.container div.prompt { width: unset; text-align: left; padding: 0.4em; } div.nboutput.container div.prompt.empty { padding: 0; } div.nbinput.container div.prompt > div, div.nboutput.container div.prompt > div { position: unset; } } /* disable scrollbars and line breaks on prompts */ div.nbinput.container div.prompt pre, div.nboutput.container div.prompt pre { overflow: hidden; white-space: pre; } /* input/output area */ div.nbinput.container div.input_area, div.nboutput.container div.output_area { -webkit-flex: 1; flex: 1; overflow: auto; } @media (max-width: 540px) { div.nbinput.container div.input_area, div.nboutput.container div.output_area { width: 100%; } } /* input area */ div.nbinput.container div.input_area { border: 1px solid #e0e0e0; border-radius: 2px; /*background: #f5f5f5;*/ } /* override MathJax center alignment in output cells */ div.nboutput.container div[class*=MathJax] { text-align: left !important; } /* override sphinx.ext.imgmath center alignment in output cells */ div.nboutput.container div.math p { text-align: left; } /* standard error */ div.nboutput.container div.output_area.stderr { background: #fdd; } /* ANSI colors */ .ansi-black-fg { color: #3E424D; } .ansi-black-bg { background-color: #3E424D; } .ansi-black-intense-fg { color: #282C36; } .ansi-black-intense-bg { background-color: #282C36; } .ansi-red-fg { color: #E75C58; } .ansi-red-bg { background-color: #E75C58; } .ansi-red-intense-fg { color: #B22B31; } .ansi-red-intense-bg { background-color: #B22B31; } .ansi-green-fg { color: #00A250; } .ansi-green-bg { background-color: #00A250; } .ansi-green-intense-fg { color: #007427; } .ansi-green-intense-bg { background-color: #007427; } .ansi-yellow-fg { color: #DDB62B; } .ansi-yellow-bg { background-color: #DDB62B; } .ansi-yellow-intense-fg { color: #B27D12; } .ansi-yellow-intense-bg { background-color: #B27D12; } .ansi-blue-fg { color: #208FFB; } .ansi-blue-bg { background-color: #208FFB; } .ansi-blue-intense-fg { color: #0065CA; } .ansi-blue-intense-bg { background-color: #0065CA; } .ansi-magenta-fg { color: #D160C4; } .ansi-magenta-bg { background-color: #D160C4; } .ansi-magenta-intense-fg { color: #A03196; } .ansi-magenta-intense-bg { background-color: #A03196; } .ansi-cyan-fg { color: #60C6C8; } .ansi-cyan-bg { background-color: #60C6C8; } .ansi-cyan-intense-fg { color: #258F8F; } .ansi-cyan-intense-bg { background-color: #258F8F; } .ansi-white-fg { color: #C5C1B4; } .ansi-white-bg { background-color: #C5C1B4; } .ansi-white-intense-fg { color: #A1A6B2; } .ansi-white-intense-bg { background-color: #A1A6B2; } .ansi-default-inverse-fg { color: #FFFFFF; } .ansi-default-inverse-bg { background-color: #000000; } .ansi-bold { font-weight: bold; } .ansi-underline { text-decoration: underline; } div.nbinput.container div.input_area div[class*=highlight] > pre, div.nboutput.container div.output_area div[class*=highlight] > pre, div.nboutput.container div.output_area div[class*=highlight].math, div.nboutput.container div.output_area.rendered_html, div.nboutput.container div.output_area > div.output_javascript, div.nboutput.container div.output_area:not(.rendered_html) > img{ padding: 5px; margin: 0; } /* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ div.nbinput.container div.input_area > div[class^='highlight'], div.nboutput.container div.output_area > div[class^='highlight']{ overflow-y: hidden; } /* hide copybtn icon on prompts (needed for 'sphinx_copybutton') */ .prompt .copybtn { display: none; } /* Some additional styling taken form the Jupyter notebook CSS */ .jp-RenderedHTMLCommon table, div.rendered_html table { border: none; border-collapse: collapse; border-spacing: 0; color: black; font-size: 12px; table-layout: fixed; } .jp-RenderedHTMLCommon thead, div.rendered_html thead { border-bottom: 1px solid black; vertical-align: bottom; } .jp-RenderedHTMLCommon tr, .jp-RenderedHTMLCommon th, .jp-RenderedHTMLCommon td, div.rendered_html tr, div.rendered_html th, div.rendered_html td { text-align: right; vertical-align: middle; padding: 0.5em 0.5em; line-height: normal; white-space: normal; max-width: none; border: none; } .jp-RenderedHTMLCommon th, div.rendered_html th { font-weight: bold; } .jp-RenderedHTMLCommon tbody tr:nth-child(odd), div.rendered_html tbody tr:nth-child(odd) { background: #f5f5f5; } .jp-RenderedHTMLCommon tbody tr:hover, div.rendered_html tbody tr:hover { background: rgba(66, 165, 245, 0.2); }
cutlass/python/docs/_static/nbsphinx-code-cells.css/0
{ "file_path": "cutlass/python/docs/_static/nbsphinx-code-cells.css", "repo_id": "cutlass", "token_count": 2670 }
49
/* body[data-theme] { */ :root { --tabs--label-text: #4b5563; --tabs--label-text--hover: #4b5563; --tabs--label-text--active: #0ea5e9; --tabs--label-text--active--hover: #0ea5e9; --tabs--label-background: transparent; --tabs--label-background--hover: transparent; --tabs--label-background--active: transparent; --tabs--label-background--active--hover: transparent; --tabs--label-border: transparent; --tabs--label-border--hover: #d1d5db; --tabs--label-border--active: #0ea5e9; --tabs--label-border--active--hover: #0ea5e9; --tabs--padding-x: 1.25em; --tabs--margin-x: 0; --tabs--border: #e6e6e6; } /* Hide radio buttons */ .tab-set > input { position: absolute; opacity: 0; } /* Tab set container */ .tab-set { border-radius: 2px; display: flex; flex-wrap: wrap; margin: 0.75em 0; position: relative; } /* Tab label */ .tab-set > label { z-index: 1; width: auto; border-bottom: 2px solid var(--tabs--label-border); padding: 1em var(--tabs--padding-x) 0.5em; margin-left: var(--tabs--margin-x); color: var(--tabs--label-text); background: var(--tabs--label-background); transition: color 250ms; cursor: pointer; font-size: 0.875em; font-weight: 700; } .tab-set > label:nth-child(2) { margin-left: 0; } /* Hovered label */ .tab-set > label:hover { color: var(--tabs--label-text--hover); background: var(--tabs--label-background--hover); border-color: var(--tabs--label-border--hover); } /* Active tab label */ .tab-set > input:checked + label { color: var(--tabs--label-text--active); background: var(--tabs--label-background--active); border-color: var(--tabs--label-border--active); } .tab-set > input:checked + label:hover { color: var(--tabs--label-text--active--hover); background: var(--tabs--label-background--active--hover); border-color: var(--tabs--label-border--active--hover); } /* Tab content */ .tab-content { order: 99; display: none; width: 100%; box-shadow: 0 -0.0625rem var(--tabs--border); } /* Show content, when input is checked. */ .tab-set > input:checked + label + .tab-content { display: block; } .tab-content > p:first-child { margin-top: 0.75rem; } /* Remove the top border on first code block */ .tab-content > [class^="highlight-"]:first-child .highlight { border-top: none; border-top-left-radius: 0; border-top-right-radius: 0; } /* Remove margins on children */ .tab-content > *:first-child { margin-top: 0; } .tab-content > *:last-child { margin-bottom: 0; } /* Remove margins on nested tabs */ .tab-content > .tab-set { margin: 0; }
cutlass/python/docs/_static/tabs.css/0
{ "file_path": "cutlass/python/docs/_static/tabs.css", "repo_id": "cutlass", "token_count": 989 }
50
CUTLASS ======= Subpackages ----------- .. toctree:: :maxdepth: 1 cutlass.emit cutlass.op cutlass.utils Epilogue -------- .. automodule:: cutlass.epilogue :members: :undoc-members: :show-inheritance: Library Defaults ---------------- .. automodule:: cutlass.library_defaults :members: :undoc-members: :show-inheritance: Swizzle ---------- .. automodule:: cutlass.swizzle :members: :undoc-members: :show-inheritance:
cutlass/python/docs_src/source/cutlass.rst/0
{ "file_path": "cutlass/python/docs_src/source/cutlass.rst", "repo_id": "cutlass", "token_count": 185 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Low-level functionality tests for GEMM with F16 operands on SM80 """ from functools import partial import logging import unittest import cutlass from cutlass.backend.utils.device import device_cc from utils import LayoutCombination, add_test_gemm cutlass.set_log_level(logging.WARNING) cc = 80 dtype = cutlass.DataType.f16 @unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.') @unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}') class GemmF16Sm80(unittest.TestCase): """ Wrapper class to which tests will be added dynamically in __main__ """ pass @unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.') @unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}') class GemmF16Sm80StreamK(unittest.TestCase): """ Wrapper class to which tests will be added dynamically in __main__ """ pass add_test_specialized = partial(add_test_gemm, element=dtype, cc=cc, cluster_shape=[1, 1, 1]) # Tests using TensorOp add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 32], warp_count=[1, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 32], warp_count=[2, 1, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[2, 2, 2], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) # Tests using SIMT add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2) # Stream K tests add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK) add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5) if __name__ == '__main__': unittest.main()
cutlass/test/python/cutlass/gemm/gemm_f16_sm80.py/0
{ "file_path": "cutlass/test/python/cutlass/gemm/gemm_f16_sm80.py", "repo_id": "cutlass", "token_count": 3797 }
52
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Helper functions & classes for interface test """ class ExpectException: """ Utility class to assert that an exception was raised when expected Example: .. highlight:: python .. code-block:: python with ExceptionExpected(True, 'Division by zero'): x = 1.0 / 0.0 :param exception_expected: whether an exception is expected to be raised :type exception_expected: bool :param message: message to print if an exception is raised when not expected or vice versa :type message: str """ def __init__(self, exception_expected: bool, message: str = '', verify_msg=False): self.exception_expected = exception_expected self.message = message self.verify_msg = verify_msg def __enter__(self): return self def __exit__(self, exc_type, exc_val, traceback): exception_raised = exc_type is not None assert self.exception_expected == exception_raised, self.message if self.verify_msg: exc_message = f"{exc_type.__name__}: {exc_val}" assert exc_message == self.message, f"expect error message {self.message}, got {exc_message}" # Suppress the exception return True
cutlass/test/python/cutlass/interface/utils.py/0
{ "file_path": "cutlass/test/python/cutlass/interface/utils.py", "repo_id": "cutlass", "token_count": 897 }
53
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <iostream> #include <iomanip> #include <utility> #include <type_traits> #include <vector> #include <numeric> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> using namespace cute; __global__ void test(double const* g_in, double* g_out) { extern __shared__ double smem[]; smem[threadIdx.x] = g_in[threadIdx.x]; __syncthreads(); g_out[threadIdx.x] = 2 * smem[threadIdx.x]; } __global__ void test2(double const* g_in, double* g_out) { using namespace cute; extern __shared__ double smem[]; auto s_tensor = make_tensor(make_smem_ptr(smem + threadIdx.x), Int<1>{}); auto g_tensor = make_tensor(make_gmem_ptr(g_in + threadIdx.x), Int<1>{}); copy(g_tensor, s_tensor); cp_async_fence(); cp_async_wait<0>(); __syncthreads(); g_out[threadIdx.x] = 2 * smem[threadIdx.x]; } TEST(SM80_CuTe_Ampere, CpAsync) { constexpr int count = 32; thrust::host_vector<double> h_in(count); for (int i = 0; i < count; ++i) { h_in[i] = double(i); } thrust::device_vector<double> d_in(h_in); thrust::device_vector<double> d_out(count, -1); test<<<1, count, sizeof(double) * count>>>( thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data())); thrust::host_vector<double> h_result = d_out; thrust::device_vector<double> d_out_cp_async(count, -2); test2<<<1, count, sizeof(double) * count>>>( thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out_cp_async.data())); thrust::host_vector<double> h_result_cp_async = d_out_cp_async; for (int i = 0; i < count; ++i) { EXPECT_EQ(h_result[i], h_result_cp_async[i]); } }
cutlass/test/unit/cute/ampere/cp_async.cu/0
{ "file_path": "cutlass/test/unit/cute/ampere/cp_async.cu", "repo_id": "cutlass", "token_count": 1191 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <cutlass/trace.h> #include <iostream> #include <cute/tensor.hpp> using namespace cute; template <class Layout> void test_left_inverse(Layout const& layout) { auto inv_layout = left_inverse(layout); CUTLASS_TRACE_HOST(layout << " ^ -1\n" << " => \n" << inv_layout); for (int i = 0; i < size(layout); ++i) { //printf("%3d: %3d %3d\n", i, int(layout(i)), int(inv_layout(layout(i)))); EXPECT_EQ(inv_layout(layout(i)), i); } CUTLASS_TRACE_HOST("Composition: " << coalesce(composition(inv_layout, layout))); } TEST(CuTe_core, Inverse_left) { { auto layout = Layout<Shape <_1>, Stride<_0>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <Shape <_1,_1>>, Stride<Stride<_0,_0>>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_1>, Stride<_1>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4>, Stride<_1>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4>, Stride<_2>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_8, _4>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_8, _4>, Stride<_4, _1>>{}; test_left_inverse(filter(layout)); } { auto layout = Layout<Shape< _2,_4,_6>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_2,_4,_6>, Stride<_4,_1,_8>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4, _2>, Stride<_1,_16>>{}; test_left_inverse(layout); } // // Swizzle left_inverse // { auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>, Stride<_1, _4>>>{}; test_left_inverse(layout); } { auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>, Stride<_4, _1>>>{}; test_left_inverse(layout); } { auto layout = ComposedLayout<Swizzle<1,0,1>, _0, Layout<Shape <_4, _4>, Stride<_8, _1>>>{}; test_left_inverse(layout); } // // Negative strides (beta support) // Post-conditions/layout indexing aren't generalized enough to support these yet // However, the composition post-condition is general enough. { auto layout = make_layout(Shape<_4>{}, Stride<Int<-1>>{}); test_left_inverse(layout); } //{ //auto layout = Layout<Shape < _2,_4>, // Stride<_m1,_2>>{}; //test_left_inverse(layout); //} //{ //auto layout = Layout<Shape < _2, _4>, // Stride< _4,_m1>>{}; //test_left_inverse(layout); //} //{ //auto layout = Layout<Shape < _2, _4, _6>, // Stride<_m1,_12,_m2>>{}; //test_left_inverse(layout); //} }
cutlass/test/unit/cute/core/inverse_left.cpp/0
{ "file_path": "cutlass/test/unit/cute/core/inverse_left.cpp", "repo_id": "cutlass", "token_count": 1980 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass_unit_test.h" #include <iostream> #include <cstdint> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> namespace cutlass::test { template <class ElementType, class SmemLayout> struct SharedStorage { cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem; alignas(16) cute::uint64_t tma_load_mbar[1]; }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout> __global__ void tma_test_device_cute(T const* g_in, T* g_out, CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler, GmemLayout gmem_layout, SmemLayout smem_layout) { using namespace cute; CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout))); // Use Shared Storage structure to allocate and distribute aligned SMEM addresses extern __shared__ char shared_memory[]; using SharedStorage = SharedStorage<T, SmemLayout>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); // Construct SMEM tensor Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...) // Shared memory barriers use 64bits in SMEM for synchronization uint64_t* tma_load_mbar = shared_storage.tma_load_mbar; // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA = tma.get_tma_tensor(shape(gmem_layout)); Tensor mB = make_tensor(make_gmem_ptr<T>(g_out), gmem_layout); constexpr int R = rank_v<CTA_Tiler>; Tensor gA = flat_divide(mA, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...) Tensor gB = flat_divide(mB, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...) // // Prepare the TMA_LOAD // auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice Tensor tAgA_x = cta_tma.partition_S(gA); // (TMA,TMA_M,TMA_N,REST_M,REST_N) Tensor tAsA_x = cta_tma.partition_D(sA); // (TMA,TMA_M,TMA_N) #if 0 if (thread0()) { print(tma); print("TILE : "); print(cta_tiler); print("\n"); print(" mA : "); print( mA); print("\n"); print(" mB : "); print( mB); print("\n"); print(" gA : "); print( gA); print("\n"); print(" gB : "); print( gB); print("\n"); print(" sA : "); print( sA); print("\n"); print("tAgA_x: "); print(tAgA_x); print("\n"); print("tAsA_x: "); print(tAsA_x); print("\n"); } #endif // // Perform the TMA_LOAD // // INPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles Tensor tAgA = group_modes<1,rank(tAgA_x)>(tAgA_x); // (TMA,REST) Tensor tAsA = group_modes<1,rank(tAsA_x)>(tAsA_x); // (TMA,REST) static_assert(size<1>(tAsA) == 1); // OUTPUT: Group the CTA_TILE_X modes and REST_X modes for output Tensor tBgB = group_modes<0,R>(group_modes<R,rank(gB)>(gB)); // (CTA_TILE, REST) #if 0 if (thread0()) { print("tAgA : "); print(tAgA); print("\n"); print("tAsA : "); print(tAsA); print("\n"); print("tBgB : "); print(tBgB); print("\n"); } #endif // Loop over the TMA stages, using smem as our buffer for (int stage = 0; stage < size<1>(tAgA); ++stage) { // Set the bytes transferred in this TMA transaction (may involve multiple issues) constexpr int kTmaTransactionBytes = sizeof(ArrayEngine<T, size(sA)>); if (threadIdx.x == 0) { /// Initialize shared memory barrier tma_load_mbar[0] = 0; cute::initialize_barrier(tma_load_mbar[0], 1 /*numThreads*/); cute::set_barrier_transaction_bytes(tma_load_mbar[0], kTmaTransactionBytes); copy(tma.with(tma_load_mbar[0]), tAgA(_,stage), tAsA(_,0)); } __syncthreads(); /// Wait on the shared memory barrier until the phase bit flips from kPhaseBit value constexpr int kPhaseBit = 0; cute::wait_barrier(tma_load_mbar[0], kPhaseBit); // // Write out trivially smem -> gmem // // Subbyte elements could cause race conditions, so be even more conservative if (thread0()) { copy(sA, tBgB(_,stage)); } __syncthreads(); } } template <class T, class TmaType = T, class CopyOp, class GMEM_Layout, class SMEM_Layout, class CTA_Tile> auto test_tma_load(CopyOp const& copy_op, GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout, CTA_Tile const& cta_tile) { using namespace cute; // Allocate and initialize host test data size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8); thrust::host_vector<uint8_t> h_in(N); for (size_t i = 0; i < h_in.size(); ++i) { h_in[i] = uint8_t(i % 13); } Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout); // Allocate and initialize device test data thrust::device_vector<uint8_t> d_in = h_in; thrust::device_vector<uint8_t> d_out(h_in.size(), uint8_t(-1)); // overflow uint // Create TMA for this device Tensor Tensor gA = make_tensor(make_gmem_ptr<T>(raw_pointer_cast(d_in.data())), gmem_layout); auto tma = make_tma_copy<TmaType>(copy_op, gA, smem_layout, cta_tile, Int<1>{}); //print(tma); // Launch int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>)); tma_test_device_cute<<<1, 128, smem_size>>>( reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())), reinterpret_cast<T*> (raw_pointer_cast(d_out.data())), tma, cta_tile, gmem_layout, smem_layout); // Copy results back to host thrust::host_vector<uint8_t> h_out = d_out; Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout); // Validate the results. Print only the first 3 errors. int count = 3; for (int i = 0; i < int(size(hA_out)) && count > 0; ++i) { EXPECT_EQ(hA_in(i), hA_out(i)); if (hA_in(i) != hA_out(i)) { --count; } } return tma; } #endif } // end namespace cutlass::test
cutlass/test/unit/cute/hopper/tma_load_testbed.hpp/0
{ "file_path": "cutlass/test/unit/cute/hopper/tma_load_testbed.hpp", "repo_id": "cutlass", "token_count": 3205 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" // Tensor Op #include "cutlass/gemm/warp/default_mma_tensor_op.h" // Volta Tensor Op #include "cutlass/gemm/warp/mma_tensor_op_sm70.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" // Simt #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/warp/mma_simt_policy.h" // Epilogue components #include "cutlass/epilogue/threadblock/default_epilogue_planar_complex.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "testbed_planar_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f32_f32_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f16_f32_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f16_f16_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f32_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<Element>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<Element>::value>; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, Element, cutlass::layout::ColumnMajor, Element, cutlass::layout::RowMajor, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, Policy >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm70, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_simt_f32_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using Element = float; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using ElementOutput = Element; using ElementAccumulator = Element; using ElementCompute = Element; using WarpMmaSimt = cutlass::gemm::warp::MmaSimt< WarpShape, Element, LayoutA, Element, LayoutB, Element, LayoutC, cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<4, 8>, cutlass::layout::RowMajorInterleaved<2>, cutlass::gemm::GemmShape<4, 4, 1> > >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaSimt, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_simt_f64_64x64_16x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using ElementOutput = Element; using ElementAccumulator = Element; using ElementCompute = Element; using WarpMmaSimt = cutlass::gemm::warp::MmaSimt< WarpShape, Element, LayoutA, Element, LayoutB, Element, LayoutC, cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<4, 8>, cutlass::layout::RowMajorInterleaved<2>, cutlass::gemm::GemmShape<4, 4, 1> > >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaSimt, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/epilogue/threadblock/epilogue_planar_complex.cu/0
{ "file_path": "cutlass/test/unit/epilogue/threadblock/epilogue_planar_complex.cu", "repo_id": "cutlass", "token_count": 4846 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with elementwise tensor-tensor broadcast epilogue */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "testbed_utils.h" #include "gemm_testbed_3x.hpp" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> struct Testbed3xTensorBroadcast { using TestBedImpl = typename detail::TestbedImpl<Gemm>; using Kernel = typename Gemm::GemmKernel; using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue; using ElementA = typename Kernel::ElementA; using StrideA = typename Kernel::StrideA; using ElementB = typename Kernel::ElementB; using StrideB = typename Kernel::StrideB; using ElementC = typename Kernel::ElementC; using StrideC = typename Kernel::StrideC; using ElementD = typename Kernel::ElementD; using StrideD = typename Kernel::StrideD; using ElementAccumulator = typename Kernel::ElementAccumulator; using ElementCompute = typename Epilogue::ElementCompute; using ElementScalar = typename Epilogue::ElementScalar; using ProblemShapeType = typename Kernel::ProblemShape; using ElementBias = typename Epilogue::ElementBias; using ActivationFunctor = typename Epilogue::ActivationFunctor; static constexpr bool IsBinaryOp0Enabled = Epilogue::IsBinaryOp0Enabled; static constexpr bool IsBinaryOp1Enabled = Epilogue::IsBinaryOp1Enabled; static constexpr bool IsUnaryOpEnabled = Epilogue::IsUnaryOpEnabled; static constexpr bool PerColBias = Epilogue::PerColumnBias; using LayoutTagA = typename TestBedImpl::LayoutTagA; using LayoutTagB = typename TestBedImpl::LayoutTagB; using LayoutTagC = typename TestBedImpl::LayoutTagC; using LayoutTagD = typename TestBedImpl::LayoutTagD; using LayoutTagVector = cutlass::layout::PackedVectorLayout; cutlass::HostTensor<ElementBias, LayoutTagVector> bias; cutlass::HostTensor<ElementC, LayoutTagC> tensor_C1; // tensor_C0 is taken from TestbedImpl's tensor_C // Detail Implementation TestBedImpl impl_; // // Methods // Testbed3xTensorBroadcast( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = TestBedImpl::kDefaultSeed ) : impl_(CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED, init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_) { } Testbed3xTensorBroadcast( typename LayoutTagA::Stride stride_factor_A_, typename LayoutTagB::Stride stride_factor_B_, typename LayoutTagC::Stride stride_factor_C_, typename LayoutTagD::Stride stride_factor_D_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = TestBedImpl::kDefaultSeed ) : impl_(stride_factor_A_, stride_factor_B_, stride_factor_C_, stride_factor_D_, CheckEquality::EXACT, ScalarLoc::ON_HOST, VectorBeta::ENABLED, init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_) { } /// Initializes data structures void initialize(ProblemShapeType problem_size) { // // Allocate the GEMM workspace for A/B/C/D tensor // impl_.initialize(problem_size); } void initialize_bias(ProblemShapeType problem_size) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto bias_size = PerColBias ? cute::get<1>(problem_shape_MNKL) : cute::get<0>(problem_shape_MNKL); bias.resize(cutlass::Coord<1>(bias_size)); EXPECT_TRUE(detail::initialize_tensor(bias.host_view(), cutlass::Distribution::Uniform, impl_.collective_mma_inputs.seed + 2023)); bias.sync_device(); } void initialize_c1(ProblemShapeType problem_size) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto M = cute::get<0>(problem_shape_MNKL); auto N = cute::get<1>(problem_shape_MNKL); auto L = cute::get<3>(problem_shape_MNKL); auto c_coord = cutlass::make_Coord(M * L, N); tensor_C1.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, impl_.collective_epilogue.stride_factor_C)); EXPECT_TRUE(detail::initialize_tensor(tensor_C1.host_view(), cutlass::Distribution::Uniform, impl_.collective_mma_inputs.seed + 2024)); tensor_C1.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cute::Shape<int,int,int,int> problem_shape_MNKL, ElementScalar alpha, ElementScalar beta, bool use_bias) { auto [M, N, K, L] = problem_shape_MNKL; impl_.collective_epilogue.tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_mma_inputs.tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_mma_inputs.tensor_B.host_view()), 0); if (impl_.collective_epilogue.tensor_D.size() > 1) { EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_epilogue.tensor_D.host_view()), 0); } if (impl_.collective_epilogue.reference_D.size() > 1) { EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_epilogue.reference_D.host_view()), 0); } bool passed = cutlass::reference::host::TensorEquals(impl_.collective_epilogue.reference_D.host_view(), impl_.collective_epilogue.tensor_D.host_view()); EXPECT_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_Gemm_device_broadcast" << M << "x" << N << "x" << K << "x" << L << "_" << cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_" << cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_" << cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt"; std::ofstream file(fname.str()); file << "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L << ", alpha: " << float(alpha) << ", beta: " << float(beta) << ", use_bias: " << use_bias << ", per-col bias: " << PerColBias << "\n\n"; if (use_bias){ file << "Bias = \n" << bias.host_view()<< "\n\n"; } file << "A =\n" << impl_.collective_mma_inputs.tensor_A.host_view() << "\nB =\n" << impl_.collective_mma_inputs.tensor_B.host_view() << "\nC0 =\n" << impl_.collective_epilogue.tensor_C.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\n\nReference =\n" << impl_.collective_epilogue.reference_D.host_view() << "\n\nComputed =\n" <<impl_.collective_epilogue.tensor_D.host_view(); } return passed; } /// Verifies the result matches the GEMM with elementwise tensor-tensor /// broadcast operation bool verify( ProblemShapeType problem_size, ElementScalar alpha, ElementScalar beta, bool use_bias) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto M = cute::get<0>(problem_shape_MNKL); auto N = cute::get<1>(problem_shape_MNKL); auto K = cute::get<2>(problem_shape_MNKL); auto L = cute::get<3>(problem_shape_MNKL); auto A = cute::make_tensor(impl_.collective_mma_inputs.tensor_A.host_data(), cute::make_layout(cute::make_shape(M, K, L), impl_.collective_mma_inputs.stride_a)); auto B = cute::make_tensor(impl_.collective_mma_inputs.tensor_B.host_data(), cute::make_layout(cute::make_shape(N, K, L), impl_.collective_mma_inputs.stride_b)); auto D = cute::make_tensor(impl_.collective_epilogue.reference_D.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d)); auto Bias = cute::make_tensor(static_cast<ElementBias*>(use_bias ? bias.host_data() : nullptr), cute::make_layout(PerColBias ? cute::make_shape(1, N) : cute::make_shape(M, 1))); auto C0 = cute::make_tensor(impl_.collective_epilogue.tensor_C.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); auto C1 = cute::make_tensor(tensor_C1.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); // Create host workspace for output of testbed. This computes a portion of the epilogue: // ref_compute_out = Activation(alpha * (A @ B) + bias) cutlass::HostTensor<ElementCompute, LayoutTagC> ref_compute_out; auto c_coord = cutlass::make_Coord(M * L, N); ref_compute_out.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, impl_.collective_epilogue.stride_factor_C), false); auto RefComputeOut = cute::make_tensor(ref_compute_out.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); cutlass::reference::host::GettMainloopParams<ElementAccumulator, decltype(A), decltype(B)> mainloop_params{A, B}; // Use a dummy null tensor for operand C because the epilogue overrides C. auto dummy_C = cute::make_tensor(static_cast<ElementC*>(nullptr), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); ElementCompute dummy_beta(0); auto dummy_Aux = cute::make_tensor(static_cast<ElementD*>(nullptr), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d)); auto dummy_Valpha = cute::make_tensor(static_cast<ElementCompute*>(nullptr), cute::make_layout(cute::make_shape(M, 1))); auto dummy_Vbeta = cute::make_tensor(static_cast<ElementCompute*>(nullptr), cute::make_layout(cute::make_shape(M, 1))); cutlass::reference::host::GettEpilogueParams< ElementScalar, ElementScalar, ElementAccumulator, ElementCompute, decltype(dummy_C), decltype(RefComputeOut), decltype(Bias), decltype(dummy_Aux), decltype(dummy_Valpha), decltype(dummy_Vbeta), ActivationFunctor, cutlass::plus<ElementCompute>, PerColBias> epilogue_params{ alpha, dummy_beta, dummy_C, RefComputeOut, Bias, dummy_Aux, dummy_Valpha, dummy_Vbeta }; cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params); cutlass::NumericConverter<ElementCompute, ElementC, Epilogue::ThreadEpilogueOp::kRound> source_converter; cutlass::NumericConverter<ElementD, ElementCompute, Epilogue::ThreadEpilogueOp::kRound> destination_converter; cutlass::multiplies<ElementCompute> mul; // Compute broadcast operations atop the reference #pragma omp parallel for collapse(3) for (int64_t l = 0; l < cute::size<2>(A.layout()); ++l) { for (int64_t m = 0; m < cute::size<0>(A.layout()); ++m) { for (int64_t n = 0; n < cute::size<0>(B.layout()); ++n) { ElementCompute intermediate = RefComputeOut(m, n, l); // Apply BinaryOp0, if needed if constexpr (IsBinaryOp0Enabled) { typename Epilogue::ThreadEpilogueOp::BinaryOp0 bin0; ElementCompute converted_source = source_converter(C0(m, n, l)); intermediate = bin0(intermediate, mul(beta, converted_source)); } // Apply BinaryOp1, if needed if constexpr (IsBinaryOp1Enabled) { typename Epilogue::ThreadEpilogueOp::BinaryOp1 bin1; ElementCompute converted_source = source_converter(C1(m, n, l)); intermediate = bin1(intermediate, mul(beta, converted_source)); } // Apply UnaryOp, if needed if constexpr (IsUnaryOpEnabled) { typename Epilogue::ThreadEpilogueOp::UnaryOp unary; intermediate = unary(intermediate); } D(m, n, l) = destination_converter(intermediate); } } } return compare_reference(problem_shape_MNKL, alpha, beta, use_bias); } /// Executes one test bool run( ProblemShapeType problem_size, ElementScalar alpha = ElementScalar(1), ElementScalar beta = ElementScalar(0), bool profiling = false, int iterations = 20, bool use_bias = true) { // Fail test if insufficient CUDA device if (!impl_.sufficient()) { std::cout << "Test failed due to insufficient CUDA device." << std::endl; return false; } // // Initialize the GEMM operator // typename Gemm::Arguments arguments; cutlass::KernelHardwareInfo hw_info; hw_info.device_id = 0; if (not profiling) { impl_.sm_count = std::min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id)); hw_info.sm_count = impl_.sm_count; } else { impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); hw_info.sm_count = impl_.sm_count; } /// Initializes data structures /// A/B/C0/D Tensor initialize(problem_size); initialize_bias(problem_size); if constexpr (IsBinaryOp1Enabled) { initialize_c1(problem_size); } arguments = typename Gemm::Arguments{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, { impl_.collective_mma_inputs.tensor_A.device_data(), impl_.collective_mma_inputs.stride_a, impl_.collective_mma_inputs.tensor_B.device_data(), impl_.collective_mma_inputs.stride_b, impl_.mma_promotion_interval }, { // Epilogue arguments { alpha, beta }, // ThreadOp arguments impl_.collective_epilogue.stride_c, impl_.collective_epilogue.tensor_D.device_data(), impl_.collective_epilogue.stride_d, use_bias ? bias.device_data() : nullptr, impl_.collective_epilogue.tensor_C.device_data(), tensor_C1.device_data() }, // Epilogue arguments end hw_info }; Gemm gemm_op; size_t workspace_size = Gemm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = gemm_op.can_implement(arguments); if (status != cutlass::Status::kSuccess) { cudaError_t error = cudaGetLastError(); std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; return true; } // // Run the GEMM // if (profiling) { return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace); } else { cudaError_t result; status = gemm_op.initialize(arguments, workspace.get()); status = gemm_op.run(); result = cudaDeviceSynchronize(); if (result != cudaSuccess) { EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync."; return false; } EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta, use_bias); if (!passed) { std::cout << "Error : Failed : with alpha: " << float(alpha) << ", beta: " << float(beta) << ", use_bias: " << use_bias << "\n"; } return passed; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> bool TestAllTensorBroadcast(bool use_bias=true) { using ElementScalar = typename Gemm::GemmKernel::CollectiveEpilogue::ElementScalar; using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape; int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB); std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment}; std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment}; if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule, cutlass::gemm::KernelTmaWarpSpecializedPingpong>) { problem_size_m.push_back(768); problem_size_n.push_back(768); } constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages; constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{}); std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment}; Testbed3xTensorBroadcast<Gemm> testbed; bool passed = true; for (int m : problem_size_m) { for (int n : problem_size_n) { for (int k : problem_size_k) { ProblemShapeType problem_size; if constexpr (cute::rank(ProblemShapeType{}) == 4) { problem_size = ProblemShapeType{m, n, k, /* l */ 1}; } else { problem_size = ProblemShapeType{m, n, k}; } for (bool use_bias : {true, false}) { passed = testbed.run( problem_size, cutlass::from_real<ElementScalar>(1), cutlass::from_real<ElementScalar>(1), false, // profiling 20, // iterations use_bias ); if (!passed) { return false; } } } } } if constexpr (cute::rank(ProblemShapeType{}) == 4) { auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3}; passed = testbed.run( problem_size, cutlass::from_real<ElementScalar>(1), cutlass::from_real<ElementScalar>(1), false, // profiling 20 // iterations ); if (!passed) { return false; } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp/0
{ "file_path": "cutlass/test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp", "repo_id": "cutlass", "token_count": 7979 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for grouped Rank2K interface */ #pragma once #include <fstream> #include <iostream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_grouped.h" #include "cutlass/gemm/kernel/default_rank_2k_grouped.h" #include "cutlass/gemm/device/rank_2k_grouped.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/tensor_view_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> struct TestbedGrouped { // // Type definitions // using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using EpilogueOutputOp = typename Rank2K::EpilogueOutputOp; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using LayoutA = typename Rank2K::LayoutA; using LayoutB = typename Rank2K::LayoutB; using LayoutC = typename Rank2K::LayoutC; using MatrixCoord = typename LayoutC::TensorCoord; // // Data members // /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint32_t seed; int problem_count; std::vector<cutlass::gemm::GemmCoord> problem_sizes_host; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device; std::vector<int64_t> offset_A; std::vector<int64_t> offset_B; std::vector<int64_t> offset_C; std::vector<int64_t> offset_D; std::vector<int64_t> lda_host; std::vector<int64_t> ldb_host; std::vector<int64_t> ldc_host; std::vector<int64_t> ldd_host; cutlass::DeviceAllocation<int64_t> lda; cutlass::DeviceAllocation<int64_t> ldb; cutlass::DeviceAllocation<int64_t> ldc; cutlass::DeviceAllocation<int64_t> ldd; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementC> block_D; cutlass::DeviceAllocation<ElementA *> ptr_A; cutlass::DeviceAllocation<ElementB *> ptr_B; cutlass::DeviceAllocation<ElementC *> ptr_C; cutlass::DeviceAllocation<ElementC *> ptr_D; // // Methods // TestbedGrouped( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { // no fill - remain zero } return true; } /// Initializes data structures void initialize() { // // Choose random problem sizes // // construct a few problems of random sizes srand(seed); int64_t total_elements_A = 0; int64_t total_elements_B = 0; int64_t total_elements_C = 0; int64_t total_elements_D = 0; lda_host.resize(problem_count); ldb_host.resize(problem_count); ldc_host.resize(problem_count); ldd_host.resize(problem_count); problem_sizes_host.clear(); problem_sizes_host.resize(problem_count); for (int32_t i = 0; i < problem_count; ++i) { auto N = 8 * (rand() % 64) + 24; auto K = 8 * (rand() % 64) + 24; cutlass::gemm::GemmCoord problem(N, N, K); if (!i) { problem = cutlass::gemm::GemmCoord(16, 16, 8); } problem_sizes_host.at(i) = problem; lda_host.at(i) = LayoutA::packed({problem.n(), problem.k()}).stride(0); ldb_host.at(i) = LayoutB::packed({problem.n(), problem.k()}).stride(0); ldc_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); ldd_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); offset_A.push_back(total_elements_A); offset_B.push_back(total_elements_B); offset_C.push_back(total_elements_C); offset_D.push_back(total_elements_D); int64_t elements_A = problem.n() * problem.k(); int64_t elements_B = problem.n() * problem.k(); int64_t elements_C = problem.n() * problem.n(); int64_t elements_D = problem.n() * problem.n(); total_elements_A += elements_A; total_elements_B += elements_B; total_elements_C += elements_C; total_elements_D += elements_D; // Random strides between problems? } problem_sizes_device.reset(problem_count); problem_sizes_device.copy_from_host(problem_sizes_host.data()); lda.reset(problem_count); ldb.reset(problem_count); ldc.reset(problem_count); ldd.reset(problem_count); lda.copy_from_host(lda_host.data()); ldb.copy_from_host(ldb_host.data()); ldc.copy_from_host(ldc_host.data()); ldd.copy_from_host(ldd_host.data()); // // Assign pointers // block_A.reset(total_elements_A); block_B.reset(total_elements_B); block_C.reset(total_elements_C); block_D.reset(total_elements_D); std::vector<ElementA *> ptr_A_host(problem_count); std::vector<ElementB *> ptr_B_host(problem_count); std::vector<ElementC *> ptr_C_host(problem_count); std::vector<ElementC *> ptr_D_host(problem_count); for (int32_t i = 0; i < problem_count; ++i) { ptr_A_host.at(i) = block_A.get() + offset_A.at(i); ptr_B_host.at(i) = block_B.get() + offset_B.at(i); ptr_C_host.at(i) = block_C.get() + offset_C.at(i); ptr_D_host.at(i) = block_D.get() + offset_D.at(i); } ptr_A.reset(problem_count); ptr_A.copy_from_host(ptr_A_host.data()); ptr_B.reset(problem_count); ptr_B.copy_from_host(ptr_B_host.data()); ptr_C.reset(problem_count); ptr_C.copy_from_host(ptr_C_host.data()); ptr_D.reset(problem_count); ptr_D.copy_from_host(ptr_D_host.data()); // // Initialize the problems of the workspace // for (int32_t i = 0; i < problem_count; ++i) { cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); MatrixCoord extent_A{problem.n(), problem.k()}; MatrixCoord extent_B{problem.n(), problem.k()}; MatrixCoord extent_C{problem.n(), problem.n()}; std::vector<ElementA> matrix_A(layout_A.capacity(extent_A)); std::vector<ElementB> matrix_B(layout_B.capacity(extent_B)); std::vector<ElementC> matrix_C(layout_C.capacity(extent_C)); std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); initialize_tensor(cutlass::TensorView<ElementA, LayoutA>(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021); initialize_tensor(cutlass::TensorView<ElementB, LayoutB>(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022); initialize_tensor(cutlass::TensorView<ElementC, LayoutC>(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023); cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size()); cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size()); cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size()); cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size()); } } /// Verifies the result is a Rank2K bool verify( ElementCompute alpha, ElementCompute beta) { bool passed = true; for (int32_t i = 0; i < problem_count; ++i) { cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); MatrixCoord extent_A{problem.n(), problem.k()}; MatrixCoord extent_B{problem.n(), problem.k()}; MatrixCoord extent_C{problem.n(), problem.n()}; std::vector<ElementA> matrix_A(layout_A.capacity(extent_A)); std::vector<ElementB> matrix_B(layout_B.capacity(extent_B)); std::vector<ElementC> matrix_C(layout_C.capacity(extent_C)); std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C)); cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size()); cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size()); cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size()); cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); cutlass::TensorView<ElementA, LayoutA> view_A(matrix_A.data(), layout_A, extent_A); cutlass::TensorView<ElementB, LayoutB> view_B(matrix_B.data(), layout_B, extent_B); cutlass::TensorView<ElementC, LayoutC> view_C(matrix_C.data(), layout_C, extent_C); cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C); cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C); // Reference Rank2K cutlass::reference::host::Rank2KComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator >( problem, alpha, view_A, Rank2K::kTransformA, view_B, Rank2K::kTransformB, beta, view_C, view_Ref, ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); // Ensure that no input or output is entirely zero EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0); // Compare against reference passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); if (!passed) { std::ofstream file("testbed_grouped_errors.txt"); file << "problem: " << problem << " [group: " << i << "]\n" << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; file << "A =\n" << view_A << "\nB =\n" << view_B << "\nC =\n" << view_C << "\n\nReference =\n" << view_Ref << "\nComputed =\n" << view_D; return passed; } } return passed; } /// Executes one test bool run( int problem_count, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { this->problem_count = problem_count; // Initialize the problem initialize(); int threadblock_count = Rank2K::sufficient(problem_sizes_host.data(), problem_count); // Early exit if (!threadblock_count) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl; } return true; } // Configure the Rank2K arguments typename EpilogueOutputOp::Params epilogue_op(alpha, beta); // Configure Rank2K arguments typename Rank2K::Arguments args( cutlass::gemm::GemmUniversalMode::kGemm, problem_sizes_device.get(), problem_count, threadblock_count, epilogue_op, ptr_A.get(), ptr_B.get(), ptr_C.get(), ptr_D.get(), lda.get(), ldb.get(), ldc.get(), ldd.get(), problem_sizes_host.data() ); // Initialize the Rank2K object Rank2K rank2k; size_t workspace_size = rank2k.get_workspace_size(args); cutlass::DeviceAllocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k.initialize(args, workspace.get()); if (status != cutlass::Status::kSuccess) { return false; } // Run the Rank2K object status = rank2k.run(); if (status != cutlass::Status::kSuccess) { return false; } // Wait for completion cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << "Kernel execution error: " << cudaGetErrorString(result); if (result != cudaSuccess) { return false; } // Verify correctness return verify(alpha, beta); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // device } // gemm } // test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/testbed_grouped_rank_2k.h/0
{ "file_path": "cutlass/test/unit/gemm/device/testbed_grouped_rank_2k.h", "repo_id": "cutlass", "token_count": 6616 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for warp-level wmma gemm */ #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED) #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" /// Test name format: SM[arch]_warp_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f16 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////// [START] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [START] ////////////////////// //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// // 4 tests for {N,T}x{N,T}=>{T} TEST(SM70_warp_wmma_row_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } // 4 tests for {N,T}x{N,T}=>{N} TEST(SM70_warp_wmma_row_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } /////////// [END] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [END] /////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 8x32x16_8x32x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } TEST(SM70_warp_wmma_col_row_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f32 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } #endif //CUTLASS_ARCH_WMMA_SM70_ENABLED
cutlass/test/unit/gemm/warp/wmma_sm70.cu/0
{ "file_path": "cutlass/test/unit/gemm/warp/wmma_sm70.cu", "repo_id": "cutlass", "token_count": 9424 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS. Generally, description - compile-time constant parameters used to instantiate an operation configuration - runtime parameters with computationally expensive initialization arguments - runtime parameters that may be passed to an initialized operation with low computational overhead */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/library/library.h" #include "cutlass/library/arch_mappings.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct NumericTypeMap; template <> struct NumericTypeMap<void> { static NumericTypeID const kId = NumericTypeID::kVoid; }; template <> struct NumericTypeMap<cutlass::uint1b_t> { static NumericTypeID const kId = NumericTypeID::kB1; }; template <> struct NumericTypeMap<cutlass::int4b_t> { static NumericTypeID const kId = NumericTypeID::kS4; }; template <> struct NumericTypeMap<int8_t> { static NumericTypeID const kId = NumericTypeID::kS8; }; template <> struct NumericTypeMap<int16_t> { static NumericTypeID const kId = NumericTypeID::kS16; }; template <> struct NumericTypeMap<int32_t> { static NumericTypeID const kId = NumericTypeID::kS32; }; template <> struct NumericTypeMap<int64_t> { static NumericTypeID const kId = NumericTypeID::kS64; }; template <> struct NumericTypeMap<cutlass::uint4b_t> { static NumericTypeID const kId = NumericTypeID::kU4; }; template <> struct NumericTypeMap<uint8_t> { static NumericTypeID const kId = NumericTypeID::kU8; }; template <> struct NumericTypeMap<cutlass::float_e4m3_t> { static NumericTypeID const kId = NumericTypeID::kFE4M3; }; template <> struct NumericTypeMap<cutlass::float_e5m2_t> { static NumericTypeID const kId = NumericTypeID::kFE5M2; }; template <> struct NumericTypeMap<uint16_t> { static NumericTypeID const kId = NumericTypeID::kU16; }; template <> struct NumericTypeMap<uint32_t> { static NumericTypeID const kId = NumericTypeID::kU32; }; template <> struct NumericTypeMap<uint64_t> { static NumericTypeID const kId = NumericTypeID::kU64; }; template <> struct NumericTypeMap<cutlass::half_t> { static NumericTypeID const kId = NumericTypeID::kF16; }; template <> struct NumericTypeMap<float> { static NumericTypeID const kId = NumericTypeID::kF32; }; template <> struct NumericTypeMap<double> { static NumericTypeID const kId = NumericTypeID::kF64; }; template <> struct NumericTypeMap<cutlass::complex<cutlass::half_t> > { static NumericTypeID const kId = NumericTypeID::kCF16; }; template <> struct NumericTypeMap<cutlass::complex<float> > { static NumericTypeID const kId = NumericTypeID::kCF32; }; template <> struct NumericTypeMap<cutlass::complex<double> > { static NumericTypeID const kId = NumericTypeID::kCF64; }; template <> struct NumericTypeMap<cutlass::bfloat16_t> { static NumericTypeID const kId = NumericTypeID::kBF16; }; template <> struct NumericTypeMap<cutlass::tfloat32_t> { static NumericTypeID const kId = NumericTypeID::kTF32; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct MathOperationMap { static MathOperationID const kId = MathOperationID::kInvalid; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAdd> { static MathOperationID const kId = MathOperationID::kMultiplyAdd; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastBF16> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastBF16; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF16> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastF16; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddSaturate> { static MathOperationID const kId = MathOperationID::kMultiplyAddSaturate; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddMixedInputUpcast> { static MathOperationID const kId = MathOperationID::kMultiplyAddMixedInputUpcast; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplex> { static MathOperationID const kId = MathOperationID::kMultiplyAddComplex; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddGaussianComplex> { static MathOperationID const kId = MathOperationID::kMultiplyAddGaussianComplex; }; template <> struct MathOperationMap<cutlass::arch::OpXorPopc> { static MathOperationID const kId = MathOperationID::kXorPopc; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF32> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastF32; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplexFastF32> { static MathOperationID const kId = MathOperationID::kMultiplyAddComplexFastF32; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct LayoutMap; template <> struct LayoutMap<cutlass::layout::ColumnMajor> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajor; }; template <> struct LayoutMap<cutlass::layout::RowMajor> { static LayoutTypeID const kId = LayoutTypeID::kRowMajor; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<2>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK2; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<2>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK2; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<4>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK4; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<4>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK4; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<16>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK16; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<16>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK16; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<32>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK32; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<32>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK32; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<64>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK64; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<64>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK64; }; template <> struct LayoutMap<cutlass::layout::TensorNHWC> { static LayoutTypeID const kId = LayoutTypeID::kTensorNHWC; }; template <> struct LayoutMap<cutlass::layout::TensorNDHWC> { static LayoutTypeID const kId = LayoutTypeID::kTensorNDHWC; }; template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<32>> { static LayoutTypeID const kId = LayoutTypeID::kTensorNC32HW32; }; template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<64>> { static LayoutTypeID const kId = LayoutTypeID::kTensorNC64HW64; }; template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<32>> { static LayoutTypeID const kId = LayoutTypeID::kTensorC32RSK32; }; template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<64>> { static LayoutTypeID const kId = LayoutTypeID::kTensorC64RSK64; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct OpcodeClassMap; template <> struct OpcodeClassMap<arch::OpClassSimt> { static OpcodeClassID const kId = OpcodeClassID::kSimt; }; template <> struct OpcodeClassMap<arch::OpClassTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kTensorOp; }; template <> struct OpcodeClassMap<arch::OpClassSparseTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kSparseTensorOp; }; template <> struct OpcodeClassMap<arch::OpClassWmmaTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kWmmaTensorOp; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <cutlass::ComplexTransform Transform> struct ComplexTransformMap; template <> struct ComplexTransformMap<cutlass::ComplexTransform::kNone> { static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kNone; }; template <> struct ComplexTransformMap<cutlass::ComplexTransform::kConjugate> { static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kConjugate; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <cutlass::conv::Mode T> struct ConvModeMap; template <> struct ConvModeMap<conv::Mode::kCrossCorrelation> { static ConvModeID const kId = ConvModeID::kCrossCorrelation; }; template <> struct ConvModeMap<conv::Mode::kConvolution> { static ConvModeID const kId = ConvModeID::kConvolution; }; template <cutlass::conv::Operator T> struct ConvKindMap; template <> struct ConvKindMap<conv::Operator::kFprop> { static ConvKind const kId = ConvKind::kFprop; }; template <> struct ConvKindMap<conv::Operator::kDgrad> { static ConvKind const kId = ConvKind::kDgrad; }; template <> struct ConvKindMap<conv::Operator::kWgrad> { static ConvKind const kId = ConvKind::kWgrad; }; template <cutlass::conv::IteratorAlgorithm T> struct IteratorAlgorithmMap; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kAnalytic> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kAnalytic; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kOptimized> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kOptimized; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFixedChannels> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFixedChannels; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFewChannels> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFewChannels; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename Layout> TensorDescription make_TensorDescription(int alignment = 1) { TensorDescription desc; desc.element = NumericTypeMap<Element>::kId; desc.layout = LayoutMap<Layout>::kId; desc.alignment = alignment; desc.log_extent_range = int(sizeof(typename Layout::TensorCoord::Index) - 1) * 8; desc.log_stride_range = int(sizeof(typename Layout::Stride::Index) - 1) * 8; return desc; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/library_internal.h/0
{ "file_path": "cutlass/tools/library/src/library_internal.h", "repo_id": "cutlass", "token_count": 3995 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines profiling functionality for convolution */ #pragma once #include <vector> #include <string> #include <memory> #include <algorithm> #include <unordered_map> // CUTLASS Library includes #include "cutlass/library/library.h" #include "cutlass/library/util.h" #include "cutlass/library/handle.h" #include "cutlass/library/manifest.h" #include "cutlass/library/singleton.h" // Profiler includes #include "options.h" #include "device_context.h" #include "operation_profiler.h" #include "performance_result.h" #include "problem_space.h" #include "reduction_operation_profiler.h" #if CUTLASS_ENABLE_CUDNN #include "cudnn_helpers.h" #endif //#if CUTLASS_ENABLE_CUDNN #include "debug.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Abstract base class for each math function class Conv2dOperationProfiler : public OperationProfiler { public: /// Problem structure obtained from problem space struct Conv2dProblem { int64_t n, h, w, c, p, q, k, r, s; int64_t groups; int64_t pad_h, pad_w; int64_t stride_h, stride_w; int64_t dilation_h, dilation_w; std::vector<uint8_t> alpha; std::vector<uint8_t> beta; library::SplitKMode split_k_mode; int64_t split_k_slices; library::ConvModeID conv_mode; library::Provider eq_gemm_provider; // convolution with parallel interleaved reduction // convolution epilogue (alpha, beta) = (1.0, 0.0) // reduction epilogue (alpha, beta) = (Conv2dProblem::alpha, Conv2dProblem::beta) std::vector<uint8_t> alpha_one; std::vector<uint8_t> beta_zero; // // Methods // /// Total number of bytes loaded int64_t bytes(library::ConvDescription const &operation_desc) const; /// Total number of flops computed int64_t flops(library::ConvDescription const &operation_desc) const; void set_default_output_size() { p = ((h + pad_h - r * dilation_h) / stride_h) + 1; q = ((w + pad_w - s * dilation_w) / stride_w) + 1; } // Returns equivalent gemm problem size for convolution cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * p * q), int(k), int(r * s * c / groups)); case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * h * w), int(c), int(k * r * s)); case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(r * s * c), int(n * p * q)); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor A std::vector<int> extent_a(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(n), int(h), int(w), int(c)}; case library::ConvKind::kDgrad: return {int(n), int(p), int(q), int(k)}; case library::ConvKind::kWgrad: return {int(n), int(p), int(q), int(k)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor B std::vector<int> extent_b(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(k), int(r), int(s), int(c / groups)}; case library::ConvKind::kDgrad: return {int(k), int(r), int(s), int(c)}; case library::ConvKind::kWgrad: return {int(n), int(h), int(w), int(c)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor C std::vector<int> extent_c(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(n), int(p), int(q), int(k)}; case library::ConvKind::kDgrad: return {int(n), int(h), int(w), int(c)}; case library::ConvKind::kWgrad: return {int(k), int(r), int(s), int(c)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix A library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix B library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix C library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const { switch (conv_kind) { // Gemm operator assumes column-major output case library::ConvKind::kFprop: case library::ConvKind::kDgrad: case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix A int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix B int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n(); case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix C int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: case library::ConvKind::kDgrad: case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } }; /// Workspace used struct Conv2dWorkspace { /// Conv device allocations DeviceAllocation *A; DeviceAllocation *B; DeviceAllocation *reordered_B; DeviceAllocation *C; DeviceAllocation *Computed; DeviceAllocation *Reference; /// Library configuration and arguments for convolution operator library::Conv2dConfiguration configuration; library::ConvArguments arguments; /// Number of copies of the problem workspace which are visited sequentially during /// profiling to avoid camping in the last level cache. int problem_count; /// Buffer used for the cutlass conv2d operations' host workspace std::vector<uint8_t> host_workspace; /// Buffer used for the cutlass operations' device workspace DeviceAllocation device_workspace; /// Library configuration and arguments for reduction operator library::ReductionConfiguration reduction_configuration; library::ReductionArguments reduction_arguments; /// Buffer used for the cutlass reduction operations' host workspace std::vector<uint8_t> reduction_host_workspace; /// Host data buffers for host reference operation /// host buffer for tensor std::vector<uint8_t> host_tensor_a; /// host buffer for tensor b std::vector<uint8_t> host_tensor_b; /// host buffer for tensor c std::vector<uint8_t> host_tensor_c; // // Methods // Conv2dWorkspace() : A(nullptr), B(nullptr), reordered_B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) {} // Set stride vector for tensor activations, filters, output void set_stride_vector(Conv2dProblem const &problem, library::ConvKind const &conv_kind, library::LayoutTypeID const &layout_a, library::LayoutTypeID const &layout_b, library::LayoutTypeID const &layout_c) { std::vector<int64_t> stride_activations; std::vector<int64_t> stride_filters; std::vector<int64_t> stride_output; // Strides for interleaved fprop if (conv_kind == library::ConvKind::kFprop && ((layout_a == library::LayoutTypeID::kTensorNC32HW32 && layout_b == library::LayoutTypeID::kTensorC32RSK32 && layout_c == library::LayoutTypeID::kTensorNC32HW32) || (layout_a == library::LayoutTypeID::kTensorNC64HW64 && layout_b == library::LayoutTypeID::kTensorC64RSK64 && layout_c == library::LayoutTypeID::kTensorNC64HW64))) { int interleave = (layout_a == library::LayoutTypeID::kTensorNC32HW32) ? 32 : 64; stride_activations.push_back(int(problem.w) * interleave); stride_activations.push_back(int(problem.w) * int(problem.h) * interleave); stride_activations.push_back(int(problem.h) * int(problem.w) * int(problem.c)); stride_filters.push_back(int(problem.k) * interleave); stride_filters.push_back(int(problem.k) * int(problem.s) * interleave); stride_filters.push_back(int(problem.k) * int(problem.s) * int(problem.r) * interleave); stride_output.push_back(int(problem.q) * interleave); stride_output.push_back(int(problem.q) * int(problem.p) * interleave); stride_output.push_back(int(problem.q) * int(problem.p) * int(problem.k)); } else { // Strides for the rest cases stride_activations.push_back(int(problem.c)); stride_activations.push_back(int(problem.w) * int(problem.c)); stride_activations.push_back(int(problem.h) * int(problem.w) * int(problem.c)); stride_filters.push_back(int(problem.c / problem.groups)); stride_filters.push_back(int(problem.s) * int(problem.c / problem.groups)); stride_filters.push_back(int(problem.r) * int(problem.s) * int(problem.c / problem.groups)); stride_output.push_back(int(problem.k)); stride_output.push_back(int(problem.q) * int(problem.k)); stride_output.push_back(int(problem.q) * int(problem.p) * int(problem.k)); } switch (conv_kind) { case library::ConvKind::kFprop: configuration.stride_a = stride_activations; configuration.stride_b = stride_filters; configuration.stride_c = stride_output; break; case library::ConvKind::kDgrad: configuration.stride_a = stride_output; configuration.stride_b = stride_filters; configuration.stride_c = stride_activations; break; case library::ConvKind::kWgrad: configuration.stride_a = stride_output; configuration.stride_b = stride_activations; configuration.stride_c = stride_filters; break; default: throw std::runtime_error( "Invalid Conv Operator (fprop, dgrad, wgrad)"); } } }; protected: // // Data members // /// CONV problem obtained from problem space Conv2dProblem problem_; /// Device memory allocations Conv2dWorkspace conv_workspace_; /// CUTLASS parallel reduction operation to follow this* conv2d operation library::Operation const *reduction_op_; public: // // Methods // /// Ctor Conv2dOperationProfiler(Options const &options); /// Destructor virtual ~Conv2dOperationProfiler(); Conv2dProblem const& problem() const { return problem_; } /// Prints usage statement for the math function virtual void print_usage(std::ostream &out) const; /// Prints examples virtual void print_examples(std::ostream &out) const; /// Extracts the problem dimensions virtual Status initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Initializes workspace virtual Status initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Verifies CUTLASS against references virtual bool verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Measures performance results virtual bool profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); protected: /// Method to profile an initialized CUTLASS operation virtual Status profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace); /// Initialize reduction problem dimensions and library::Operation bool initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Initializes the performance result void initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space); /// Verifies CUTLASS against host reference bool verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Verifies CUTLASS against device reference bool verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); #endif //#if CUTLASS_ENABLE_CUDNN }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h/0
{ "file_path": "cutlass/tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h", "repo_id": "cutlass", "token_count": 6686 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <stdio.h> #include "cutlass/cutlass.h" /** * \file * \brief C++ interface to dump fragments and shared memory contents for * debugging. */ namespace cutlass { namespace debug { /****************************************************************************** * Dump the fragments ******************************************************************************/ /// The first N threads dump the first M elements from their fragments with a /// stride of S elements. If N is not specified, dump the data of all the /// threads. If M is not specified, dump all the elements of the fragment. template <typename Fragment> CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0, int S = 1) { int total_threads = blockDim.x * blockDim.y * blockDim.z; int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (N < 0 || N > total_threads) { if (thread_id == 0 && block_id == 0) printf("Thread number N = %d should between [1, %d].\n", N, total_threads); __syncthreads(); return; } int total_elements = int(frag.size()); if (M < 0 || M > total_elements) { if (thread_id == 0 && block_id == 0) printf("Element number M = %d should between [1, %d].\n", M, total_elements); __syncthreads(); return; } if (N == 0) N = total_threads; if (M == 0) M = total_elements; if (S < 1 || S > M) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, M); __syncthreads(); return; } if (thread_id == 0 && block_id == 0) printf("\n*******************Dumping the fragments*******************\n\n"); CUTLASS_PRAGMA_NO_UNROLL for (int tid = 0; tid < N; ++tid) { if (tid == thread_id) { printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31); CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < M; i += S) { printf("%.0f ", float(typename Fragment::value_type(frag[i]))); } printf("\n"); } __syncthreads(); } if (thread_id == 0 && block_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } /****************************************************************************** * Dump the shared memory ******************************************************************************/ #define SHMEM_ROW_SIZE 128 /// Dump the shared memory contents. ptr is the begin address, size specifies /// the number of elements that need to be dumped, and S specifies the stride. template <typename Element> CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) { int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (ptr == nullptr) { if (thread_id == 0 && block_id == 0) printf("ptr is null.\n"); __syncthreads(); return; } if (size < 1) { if (thread_id == 0 && block_id == 0) printf("Element size is less than 1\n"); __syncthreads(); return; } int row_elements = SHMEM_ROW_SIZE / sizeof(Element); if (S < 1 || S > row_elements) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, row_elements); __syncthreads(); return; } __syncthreads(); if (thread_id == 0) printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id); if (thread_id == 0) { for (int i = 0; i < size; i += row_elements) { for (int j = 0; j < row_elements; j += S) { printf("%.0f ", float(ptr[i + j])); } printf("\n"); } } if (thread_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } } // namespace debug } // namespace cutlass
cutlass/tools/util/include/cutlass/util/device_dump.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/device_dump.h", "repo_id": "cutlass", "token_count": 2057 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief HostTensor contributes management for both host and device memory. HostTensor allocates host and device memory upon construction. Basic element-wise operations on host memory synchronize device memory automatically. Explicit copy operations provide abstractions for CUDA memcpy operations. Call {host, device}_{data, ref, view}() for accessing host or device memory. See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details. */ #include <vector> #include "cutlass/cutlass.h" #include "cutlass/tensor_ref_planar_complex.h" #include "cutlass/tensor_view_planar_complex.h" #include "device_memory.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Host tensor template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class HostTensorPlanarComplex { public: /// Data type of individual access using Element = Element_; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// Tensor reference to device memory using TensorRef = TensorRefPlanarComplex<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorRef = typename TensorRef::ConstTensorRef; /// Tensor reference to device memory using TensorView = TensorViewPlanarComplex<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorView = typename TensorView::ConstTensorView; /// Reference to element in tensor using Reference = typename TensorRef::Reference; /// Constant reference to element in tensor using ConstReference = typename ConstTensorRef::Reference; private: // // Data members // /// Extent of tensor in logical dimensions TensorCoord extent_; /// Layout object Layout layout_; /// Host-side memory allocation std::vector<Element> host_; /// Device-side memory device_memory::allocation<Element> device_; public: // // Device and Host Methods // /// Default constructor HostTensorPlanarComplex() {} /// Constructs a tensor given an extent. Assumes a packed layout HostTensorPlanarComplex( TensorCoord const &extent, bool device_backed = true ) { this->reset(extent, Layout::packed(extent), device_backed); } /// Constructs a tensor given an extent and layout HostTensorPlanarComplex( TensorCoord const &extent, Layout const &layout, bool device_backed = true ) { this->reset(extent, layout, device_backed); } ~HostTensorPlanarComplex() { } /// Clears the HostTensor allocation to size/capacity = 0 void reset() { extent_ = TensorCoord(); layout_ = Layout::packed(extent_); host_.clear(); device_.reset(); } /// Resizes internal memory allocations without affecting layout or extent void reserve( size_t count, ///< size of tensor in elements bool device_backed_ = true) { ///< if true, device memory is also allocated device_.reset(); host_.clear(); host_.resize(count * 2); // Allocate memory Element* device_memory = nullptr; if (device_backed_) { device_memory = device_memory::allocate<Element>(count * 2); } device_.reset(device_memory, device_backed_ ? count * 2 : 0); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. void reset( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; reserve(size_t(layout_.capacity(extent_)), device_backed_); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. Assumes a packed tensor configuration. void reset( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. reset(extent, Layout::packed(extent), device_backed_); } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). void resize( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; LongIndex new_size = size_t(layout_.capacity(extent_)); if (static_cast<decltype(host_.size())>(new_size * 2) > host_.size()) { reserve(new_size); } } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration. void resize( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. resize(extent, Layout::packed(extent), device_backed_); } /// Returns the number of elements stored in the host tensor size_t size() const { return host_.size() / 2; } /// Returns the logical capacity based on extent and layout. May differ from size(). LongIndex capacity() const { return layout_.capacity(extent_); } /// Stride between real and imaginary parts LongIndex imaginary_stride() const { return host_.size() / 2; } /// Gets pointer to host data Element * host_data() { return host_.data(); } /// Gets pointer to host data imaginary part Element * host_data_imag() { return host_.data() + imaginary_stride(); } /// Gets pointer to host data with a pointer offset Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return host_data() + ptr_element_offset; } /// Gets pointer to host data with a pointer offset Element * host_data_imag_ptr_offset(LongIndex ptr_element_offset) { return host_data_imag() + ptr_element_offset; } /// Gets a reference to an element in host memory Reference host_data(LongIndex idx) { return PlanarComplexReference<Element>(host_data() + idx, host_data_imag() + idx); } /// Gets pointer to host data Element const * host_data() const { return host_.data(); } /// Gets pointer to host data imaginary part Element const * host_data_imag() const { return host_.data() + imaginary_stride(); } /// Gets a constant reference to an element in host memory ConstReference host_data(LongIndex idx) const { return PlanarComplexReference<Element const>(host_data() + idx, host_data_imag() + idx); } /// Gets pointer to device data Element * device_data() { return device_.get(); } /// Gets pointer to device data with a pointer offset Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return device_.get() + ptr_element_offset; } /// Gets pointer to device data Element const * device_data() const { return device_.get(); } /// Gets pointer to device data with a pointer offset Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return device_.get() + ptr_element_offset; } /// Gets a pointer to the device data imaginary part Element * device_data_imag() { return device_.get() + imaginary_stride(); } /// Accesses the tensor reference pointing to data TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> host_ref_real() { return cutlass::TensorRef<Element, Layout>(host_data(), layout_); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> host_ref_imag() { return cutlass::TensorRef<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_); } /// Accesses the tensor reference pointing to data ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Accesses the tensor reference pointing to data TensorRef device_ref(LongIndex ptr_element_offset=0) { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Accesses the tensor reference pointing to data ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> device_ref_real() { return cutlass::TensorRef<Element, Layout>(device_data(), layout_); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> device_ref_imag() { return cutlass::TensorRef<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_); } /// Accesses the tensor reference pointing to data TensorView host_view(LongIndex ptr_element_offset=0) { return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data ConstTensorView host_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> host_view_real() { return cutlass::TensorView<Element, Layout>(host_data(), layout_, extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> host_view_imag() { return cutlass::TensorView<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_, extent_); } /// Accesses the tensor reference pointing to data TensorView device_view(LongIndex ptr_element_offset=0) { return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data ConstTensorView device_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> device_view_real() { return cutlass::TensorView<Element, Layout>(device_data(), layout_, extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> device_view_imag() { return cutlass::TensorView<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_, extent_); } /// Returns true if device memory is allocated bool device_backed() const { return (device_.get() == nullptr) ? false : true; } /// Returns the layout object Layout layout() const { return layout_; } /// Returns the layout object's stride vector Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension Index stride(int dim) const { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at the logical Coord in host memory Reference at(TensorCoord const& coord) { return host_data(offset(coord)); } /// Returns a const reference to the element at the logical Coord in host memory ConstReference at(TensorCoord const& coord) const { return host_data(offset(coord)); } /// Returns the extent of the tensor TensorCoord extent() const { return extent_; } /// Returns the extent of the tensor TensorCoord & extent() { return extent_; } /// Copies data from device to host void sync_host() { if (device_backed()) { device_memory::copy_to_host( host_data(), device_data(), imaginary_stride() * 2); } } /// Copies data from host to device void sync_device() { if (device_backed()) { device_memory::copy_to_device( device_data(), host_data(), imaginary_stride() * 2); } } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_host( Element const* ptr_device_real, ///< source device memory Element const* ptr_device_imag, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_host( host_data(), ptr_device_real, count); device_memory::copy_to_host( host_data_imag(), ptr_device_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_device( Element const* ptr_device_real, ///< source device memory Element const* ptr_device_imag, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_device_to_device( device_data(), ptr_device_real, count); device_memory::copy_device_to_device( device_data_imag(), ptr_device_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_device( Element const* ptr_host_real, ///< source host memory Element const* ptr_host_imag, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_device( device_data(), ptr_host_real, count); device_memory::copy_to_device( device_data_imag(), ptr_host_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_host( Element const* ptr_host_real, ///< source host memory Element const* ptr_host_imag, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_host_to_host( host_data(), ptr_host_real, count); device_memory::copy_host_to_host( host_data_imag(), ptr_host_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_host( Element * ptr_host_real, ///< source device memory Element * ptr_host_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_host( ptr_host_real, device_data(), count); device_memory::copy_to_host( ptr_host_imag, device_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_device( Element * ptr_device_real, ///< source device memory Element * ptr_device_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_device_to_device( ptr_device_real, device_data(), count); device_memory::copy_device_to_device( ptr_device_imag, device_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_device( Element * ptr_device_real, ///< source device memory Element * ptr_device_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_device( ptr_device_real, host_data(), count); device_memory::copy_to_device( ptr_device_imag, host_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_host( Element * ptr_host_real, ///< source host memory Element * ptr_host_imag, ///< source host memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_host_to_host( ptr_host_real, host_data(), count); device_memory::copy_host_to_host( ptr_host_imag, host_data_imag(), count); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h", "repo_id": "cutlass", "token_count": 6640 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines host-side elementwise operations on TensorView. */ #pragma once // Standard Library includes #include <utility> // Cutlass includes #include "cutlass/cutlass.h" #include "cutlass/relatively_equal.h" #include "cutlass/util/distribution.h" #include "tensor_foreach.h" namespace cutlass { namespace reference { namespace device { /////////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { template <typename Element> __global__ void BlockCompareEqual( int *equal, Element const *ptr_A, Element const *ptr_B, size_t capacity) { size_t idx = threadIdx.x + blockDim.x * blockIdx.x; for (; idx < capacity; idx += gridDim.x * blockDim.x) { Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx); Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx); if (a != b) { *equal = 0; return; } } } template <typename Element> __global__ void BlockCompareRelativelyEqual( int *equal, Element const *ptr_A, Element const *ptr_B, size_t capacity, Element epsilon, Element nonzero_floor) { size_t idx = threadIdx.x + blockDim.x * blockIdx.x; for (; idx < capacity; idx += gridDim.x * blockDim.x) { Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx); Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx); if (!relatively_equal(a, b, epsilon, nonzero_floor)) { *equal = 0; return; } } } } // namespace kernel /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performs a bit-level equality check between two blocks template <typename Element> bool BlockCompareEqual( Element const *ptr_A, Element const *ptr_B, size_t capacity, int grid_size = 0, int block_size = 0) { int equal_flag = 1; int *device_equal_flag = nullptr; if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { throw std::runtime_error("Failed to allocate device flag."); } if (cudaMemcpy( device_equal_flag, &equal_flag, sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { throw std::runtime_error("Failed to copy equality flag to device."); } if (!grid_size || !block_size) { // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API cudaError_t result = cudaOccupancyMaxPotentialBlockSize( &grid_size, &block_size, reinterpret_cast<void const *>(kernel::BlockCompareEqual<Element>)); if (result != cudaSuccess) { throw std::runtime_error("Failed to query occupancy."); } // Limit block size. This has the effect of increasing the number of items processed by a // single thread and reduces the impact of initialization overhead. block_size = (block_size < 128 ? block_size : 128); } dim3 grid(grid_size, 1, 1); dim3 block(block_size, 1, 1); kernel::BlockCompareEqual<Element><<< grid, block >>>(device_equal_flag, ptr_A, ptr_B, capacity); if (cudaMemcpy( &equal_flag, device_equal_flag, sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(device_equal_flag); throw std::runtime_error("Failed to copy equality flag from device."); } cudaFree(device_equal_flag); return equal_flag; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performs a bit-level equality check between two blocks template <typename Element> bool BlockCompareRelativelyEqual( Element const *ptr_A, Element const *ptr_B, size_t capacity, Element epsilon, Element nonzero_floor, int grid_size = 0, int block_size = 0) { int equal_flag = 1; int *device_equal_flag = nullptr; if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { throw std::runtime_error("Failed to allocate device flag."); } if (cudaMemcpy( device_equal_flag, &equal_flag, sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { throw std::runtime_error("Failed to copy equality flag to device."); } if (!grid_size || !block_size) { // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API cudaError_t result = cudaOccupancyMaxPotentialBlockSize( &grid_size, &block_size, reinterpret_cast<void const *>(kernel::BlockCompareRelativelyEqual<Element>)); if (result != cudaSuccess) { throw std::runtime_error("Failed to query occupancy."); } // Limit block size. This has the effect of increasing the number of items processed by a // single thread and reduces the impact of initialization overhead. block_size = (block_size < 128 ? block_size : 128); } dim3 grid(grid_size, 1, 1); dim3 block(block_size, 1, 1); kernel::BlockCompareRelativelyEqual<Element><<< grid, block >>>( device_equal_flag, ptr_A, ptr_B, capacity, epsilon, nonzero_floor ); if (cudaMemcpy( &equal_flag, device_equal_flag, sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(device_equal_flag); throw std::runtime_error("Failed to copy equality flag from device."); } cudaFree(device_equal_flag); return equal_flag; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // device } // reference } // cutlass
cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h", "repo_id": "cutlass", "token_count": 2337 }
65
![ALT](./media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS") [README](./README.md#documentation) > **Contributors** # CUTLASS Developers and Contributors This is the official list of CUTLASS developers and contributors. ## DEVELOPERS Vijay Thakkar<br /> Pradeep Ramani<br /> Cris Cecka<br /> Aniket Shivam<br /> Jack Kosaian<br /> Mark Hoemmen<br /> Richard Cai<br /> Honghao Lu<br /> Ethan Yan<br /> Haicheng Wu<br /> Andrew Kerr<br /> Dustyn Blasig<br /> Fengqi Qiao<br /> Duane Merrill<br /> Yujia Zhai<br /> Rawn Henry<br /> Sergey Klevtsov<br /> Shang Zhang<br /> Piotr Majcher<br /> Paul Springer<br /> Markus Hohnerbach<br /> Jin Wang<br /> Aditya Atluri<br /> ## CuTe Cris Cecka<br /> Vijay Thakkar<br /> ## CUTLASS Product Manager Matthew Nicely<br /> ## Former CUTLASS Developers Manish Gupta<br /> Naila Farooqui<br /> David Tanner<br /> Manikandan Ananth<br /> Zhaodong Chen<br /> Chinmay Talegaonkar<br /> ## CONTRIBUTORS Timothy Costa<br /> Julien Demouth<br /> Brian Fahs<br /> Michael Garland<br /> Michael Goldfarb<br /> Mostafa Hagog<br /> Fei Hu<br /> Alan Kaatz<br /> Tina Li<br /> Timmy Liu<br /> Wei Liu<br /> Tim Martin<br /> Duane Merrill<br /> Kevin Siu<br /> Markus Tavenrath<br /> John Tran<br /> Vicki Wang<br /> Junkai Wu<br /> Fung Xie<br /> Albert Xu<br /> Yang Xu<br /> Jack Yang<br /> Scott Yokim<br /> Xiuxia Zhang<br /> Nick Zhao<br /> ## ACKNOWLEDGEMENTS Girish Bharambe<br /> Luke Durant<br /> Carter Edwards<br /> Olivier Giroux<br /> Stephen Jones<br /> Rishkul Kulkarni<br /> Bryce Lelbach<br /> Joel McCormack<br /> Kyrylo Perelygin<br /> Sean Treichler<br />
cutlass/CONTRIBUTORS.md/0
{ "file_path": "cutlass/CONTRIBUTORS.md", "repo_id": "cutlass", "token_count": 657 }
0
theme: jekyll-theme-minimal
cutlass/docs/_config.yml/0
{ "file_path": "cutlass/docs/_config.yml", "repo_id": "cutlass", "token_count": 10 }
1
var searchData= [ ['floatroundstyle',['FloatRoundStyle',['../namespacecutlass.html#aabe6b8ce223bf05f65a4721a3f5447a6',1,'cutlass']]] ];
cutlass/docs/search/enums_1.js/0
{ "file_path": "cutlass/docs/search/enums_1.js", "repo_id": "cutlass", "token_count": 59 }
2
var searchData= [ ['gaussian',['Gaussian',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa39890d8be86d514207259b1b5dca3ed5',1,'cutlass::Distribution']]] ];
cutlass/docs/search/enumvalues_0.js/0
{ "file_path": "cutlass/docs/search/enumvalues_0.js", "repo_id": "cutlass", "token_count": 82 }
3
var searchData= [ ['batched_5freduction_2eh',['batched_reduction.h',['../batched__reduction_8h.html',1,'']]], ['batched_5freduction_5ftraits_2eh',['batched_reduction_traits.h',['../batched__reduction__traits_8h.html',1,'']]] ];
cutlass/docs/search/files_1.js/0
{ "file_path": "cutlass/docs/search/files_1.js", "repo_id": "cutlass", "token_count": 97 }
4
var searchData= [ ['fast_5fmath_2eh',['fast_math.h',['../fast__math_8h.html',1,'']]], ['fragment_5fiterator_5fcomplex_5ftensor_5fop_2eh',['fragment_iterator_complex_tensor_op.h',['../fragment__iterator__complex__tensor__op_8h.html',1,'']]], ['fragment_5fiterator_5fsimt_2eh',['fragment_iterator_simt.h',['../fragment__iterator__simt_8h.html',1,'']]], ['fragment_5fiterator_5ftensor_5fop_2eh',['fragment_iterator_tensor_op.h',['../fragment__iterator__tensor__op_8h.html',1,'']]], ['fragment_5fiterator_5fvolta_5ftensor_5fop_2eh',['fragment_iterator_volta_tensor_op.h',['../fragment__iterator__volta__tensor__op_8h.html',1,'']]], ['fragment_5fiterator_5fwmma_5ftensor_5fop_2eh',['fragment_iterator_wmma_tensor_op.h',['../fragment__iterator__wmma__tensor__op_8h.html',1,'']]], ['functional_2eh',['functional.h',['../functional_8h.html',1,'']]] ];
cutlass/docs/search/files_5.js/0
{ "file_path": "cutlass/docs/search/files_5.js", "repo_id": "cutlass", "token_count": 381 }
5
var searchData= [ ['output_5ftile_5fthread_5fmap_2eh',['output_tile_thread_map.h',['../output__tile__thread__map_8h.html',1,'']]] ];
cutlass/docs/search/files_d.js/0
{ "file_path": "cutlass/docs/search/files_d.js", "repo_id": "cutlass", "token_count": 59 }
6
var indexSectionsWithContent = { 0: "_abcdefghiklmnopqrstuvwxy~", 1: "abcdefghiklmnoprstuvwx", 2: "c", 3: "abcdefghiklmnoprstvw", 4: "_abcdefghiklmnopqrstuvw~", 5: "abcdefghiklmnoprstuvw", 6: "abcdefghiklmnoprstuvwy", 7: "cfgklmnos", 8: "gikrsuv", 9: "_cns", 10: "p" }; var indexSectionNames = { 0: "all", 1: "classes", 2: "namespaces", 3: "files", 4: "functions", 5: "variables", 6: "typedefs", 7: "enums", 8: "enumvalues", 9: "defines", 10: "groups" }; var indexSectionLabels = { 0: "All", 1: "Classes", 2: "Namespaces", 3: "Files", 4: "Functions", 5: "Variables", 6: "Typedefs", 7: "Enumerations", 8: "Enumerator", 9: "Macros", 10: "Modules" };
cutlass/docs/search/searchdata.js/0
{ "file_path": "cutlass/docs/search/searchdata.js", "repo_id": "cutlass", "token_count": 362 }
7
var searchData= [ ['yes',['yes',['../structcutlass_1_1platform_1_1is__base__of__helper.html#ac1cf3f804e7686213fd42c678cc6d669',1,'cutlass::platform::is_base_of_helper']]] ];
cutlass/docs/search/typedefs_15.js/0
{ "file_path": "cutlass/docs/search/typedefs_15.js", "repo_id": "cutlass", "token_count": 79 }
8
var searchData= [ ['kernelclass',['KernelClass',['../structcutlass_1_1reduction_1_1BatchedReductionTraits.html#a085c72d54426f5eb60f5bffa9c383229',1,'cutlass::reduction::BatchedReductionTraits']]] ];
cutlass/docs/search/typedefs_9.js/0
{ "file_path": "cutlass/docs/search/typedefs_9.js", "repo_id": "cutlass", "token_count": 83 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> // Run tests on GPUs int testRun(int arch, std::vector<bool (*)()> & test_funcs, const std::string & test_name) { bool supported = false; int arch_major = arch / 10; int arch_minor = arch - arch / 10 * 10; if(arch_major >= 8) { // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0)) { supported = true; } } else if(arch_major >= 7) { // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)) { supported = true; } } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major == arch_major && props.minor == arch_minor)) { supported = false; } if (!supported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. std::cout << "This example isn't supported on current architecture" << std::endl; return 0; } bool pass = true; std::cout << "Device: " << props.name << std::endl; std::cout << "Arch: SM" << arch << std::endl; std::cout << "Test: " << test_name << std::endl; for(auto func : test_funcs) { pass &= func(); } if(pass) return 0; else return -1; }
cutlass/examples/13_two_tensor_op_fusion/test_run.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/test_run.h", "repo_id": "cutlass", "token_count": 1129 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using Element = cutlass::Quaternion<float>; using ElementAccumulator = Element; // Data type of accumulator using ElementComputeEpilogue = Element; // Data type of epilogue computation (alpha, beta) using ElementInputA = Element; // Data type of elements in input tensor using ElementInputB = Element; // Data type of elements in input tensor using ElementOutput = Element; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm50; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // SIMT instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha_w", alpha.w()); cmd.get_cmd_line_argument("alpha_x", alpha.x()); cmd.get_cmd_line_argument("alpha_y", alpha.y()); cmd.get_cmd_line_argument("alpha_z", alpha.z()); cmd.get_cmd_line_argument("beta_w", beta.w()); cmd.get_cmd_line_argument("beta_x", beta.x()); cmd.get_cmd_line_argument("beta_y", beta.y()); cmd.get_cmd_line_argument("beta_z", beta.z()); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "22_quaternion_conv example\n\n" << " This example uses Ampere's Tensor Core operators on F16 data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n" << "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()) * 16; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, 7, -8, 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, 7, -8, 0); // Fill tensor C on host with zeros cutlass::reference::host::TensorFill( tensor_c.host_view()); // Fill tensor C for reference on host with zeros cutlass::reference::host::TensorFill( tensor_ref_c.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_ref_c.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); // Construct ImplicitGemm::Argument structure with conv2d // problem size, data pointers, and epilogue values typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_c.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator >( problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_c.host_ref(), options.alpha, options.beta ); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_c.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_c.host_view(), tensor_ref_c.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "22_quaternion_conv_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256, 512}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/22_quaternion_conv/quaternion_conv.cu/0
{ "file_path": "cutlass/examples/22_quaternion_conv/quaternion_conv.cu", "repo_id": "cutlass", "token_count": 8258 }
11
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example shows how to compute conv2d gradient with respect to weight (wgrad). In wgrad, the K dimension of impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q). Split-k with parallel reduction is highly effective for such cases. Given split_k_slices parameter, it partitions the K loop into split_k_slices chunks and computes partial reductions in parallel across different blocks. After that, a parallel reduction kernel is launched to accumulate partial reductions. In practice, wgrad requires fp32 accumulation to avoid overflow. When the input is fp16, some care is needed to correctly instantiate the GEMM template. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_wgrad.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements // In Wgrad, fp32 accumulation is necessary in practice. using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = cutlass::half_t; // Data type of elements in output tensor using ElementC = ElementOutput; using ElementCompute = ElementComputeEpilogue; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // We need two epilogue functors - one for GEMM and another for the final reduction. // The epilogue for GEMM is not used, but needed to instantiate the CUTLASS kernel template. // Note that, when the input is fp16 and accumulation is fp32, the output of GEMM needs to be fp32, // the final reduction is done in fp32, and the reduction epilogue converts fp32 outputs to fp16. // Therefore, the output type of the GEMM epilogue is ElementCompute, not ElementOutput. // This code section describes the epilogue part of the kernel, we use default value using EpilogueOpGEMM = cutlass::epilogue::thread::LinearCombination< ElementCompute, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementCompute>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination // The epilogue functor for reduction. This is the one that is actually used. using EpilogueOpReduction = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in lin using Conv2dWgradKernel = typename cutlass::conv::kernel::DefaultConv2dWgrad< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementAccumulator, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOpGEMM, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dWgradKernel>; using EpilogueOutputOp = EpilogueOpReduction; /// Reduction kernel using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp >; using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>; using ReductionStrideIndex = typename ReductionDevice::StrideIndex; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; int split_k_slices; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(1), beta(0), split_k_slices(8), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size, cutlass::MatrixCoord stride) { this->input_size = input_size; this->filter_size = filter_size; conv_stride = stride; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "30_wgrad_split_k example\n\n" << " This example shows how to compute conv2d gradient with respect to weight (wgrad).\n" << " In wgrad, the K dimension of impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q).\n" << " Split-k with parallel reduction is highly effective for such cases.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --split-k-slices=<int> Split-k factor \n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/30_wgrad_split_k/30_wgrad_split_k --n=32 --h=224 --w=224 --c=128 --k=256 --r=3 --s=3 --split-k-slices=8\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord(input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << options.conv_stride.row() << "," << options.conv_stride.column() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // // Inputs are the output gradient and the original activation. cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(7), ElementInputA(-8), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill tensor C, D on host with zeros cutlass::reference::host::TensorFill(tensor_c.host_view()); cutlass::reference::host::TensorFill(tensor_d.host_view()); // Fill tensor D for reference on host with zeros cutlass::reference::host::TensorFill(tensor_ref_d.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Partition the GEMM K loop into split_k_slices chunks int split_k_slices = options.split_k_slices; // Construct Conv2dProblemSize with user defined output size // Do not forget to pass the last argument. cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); using cutlass::layout::TensorNHWC; cutlass::conv::SplitKMode const split_k_mode = cutlass::conv::SplitKMode::kParallel; // Since the epilogue is not computed after GEMM, there is no need to pass the C tensor and // alpha and beta can be set to 1 and 0 respectively. // Moreover, since the output will be written to the workspace, there is no need to pass // the D tensor as well. // Do not forget to pass the last argument. typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), {nullptr, TensorNHWC()}, {nullptr, TensorNHWC()}, {ElementCompute(1), ElementCompute(0)}, split_k_mode }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm; size_t workspace_size = implicit_gemm.get_workspace_size(arguments); // Split-K requires non-zero workspace size. The workspace size grows linearly with split_k_slices. std::cout << "split-k-slices: " << split_k_slices << std::endl; std::cout << "workspace size: " << workspace_size << std::endl; // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm.can_implement(arguments); CUTLASS_CHECK(result.status); // After the workspace is allocated, we point the GEMM destination pointer to the workspace. TensorNHWC layout_D{TensorNHWC::packed(options.filter_size)}; arguments.ref_D.reset(reinterpret_cast<ElementCompute*>(workspace.get()), layout_D); result.status = implicit_gemm.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm(); CUTLASS_CHECK(result.status); if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // Do reduction ReductionDevice reduction_op; auto& status = result.status; static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemm::kConvolutionalOperator; typename ReductionDevice::Arguments reduction_args( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), problem_size.split_k_slices, cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), // Reduction input { reinterpret_cast<ElementAccumulator*> (workspace.get()), ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, // Destination { tensor_d.device_data(), ReductionStrideIndex(tensor_d.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, // Source { tensor_c.device_data(), ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, {options.alpha, options.beta} ); status = reduction_op.initialize(reduction_args, nullptr); status = reduction_op(); } // // Optional reference check // if (options.reference_check) { std::cout << "Verification on device...\n"; // Compute with reference implementation cutlass::reference::device::Conv2dWgrad< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue> >( problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_ref_d.device_ref(), options.alpha, options.beta ); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_c.sync_host(); tensor_d.sync_host(); tensor_ref_d.sync_host(); bool passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "30_wgrad_split_k_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {34, 408}; struct Benchmark { int h, w, c, k, r, s, stride_h, stride_w; } layers[] = { {56, 56, 64, 256, 1, 1, 1, 1}, {56, 56, 64, 64, 1, 1, 1, 1}, {56, 56, 64, 64, 3, 3, 1, 1}, {56, 56, 256, 64, 1, 1, 1, 1}, {56, 56, 256, 512, 1, 1, 2, 2}, {56, 56, 256, 128, 1, 1, 1, 1}, {56, 56, 128, 128, 3, 3, 2, 2}, {28, 28, 128, 512, 1, 1, 1, 1}, {28, 28, 512, 128, 1, 1, 1, 1}, {28, 28, 128, 128, 3, 3, 1, 1}, {28, 28, 512, 1024, 1, 1, 2, 2}, {28, 28, 512, 256, 1, 1, 1, 1}, {28, 28, 256, 256, 3, 3, 2, 2}, {14, 14, 256, 1024, 1, 1, 1, 1}, {14, 14, 1024, 256, 1, 1, 1, 1}, {14, 14, 256, 256, 3, 3, 1, 1}, {14, 14, 1024, 2048, 1, 1, 2, 2}, {14, 14, 1024, 512, 1, 1, 1, 1}, {14, 14, 512, 512, 3, 3, 2, 2}, { 7, 7, 512, 2048, 1, 1, 1, 1}, { 7, 7, 2048, 512, 1, 1, 1, 1}, { 7, 7, 512, 512, 3, 3, 1, 1}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}, {layer.stride_h, layer.stride_w}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/30_wgrad_split_k/30_wgrad_split_k.cu/0
{ "file_path": "cutlass/examples/30_wgrad_split_k/30_wgrad_split_k.cu", "repo_id": "cutlass", "token_count": 10172 }
12
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class CustomMmaBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = GemmShape< Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM template <typename Element, typename OperandShape, typename OperandLayout> struct OperandSharedStorage { AlignedBuffer<Element, OperandShape::kCount> buffer; using TensorRef = TensorRef<Element, OperandLayout>; CUTLASS_DEVICE static OperandLayout Layout() { return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn}); } /// Returns a TensorRef to the operand CUTLASS_HOST_DEVICE TensorRef ref() { return TensorRef{buffer.data(), Layout()}; } }; /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape< Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape< Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; using SharedStorageA = OperandSharedStorage< typename Operator::ElementA, ShapeA, typename Operator::LayoutA>; using SharedStorageB = OperandSharedStorage< typename Operator::ElementB, ShapeB, typename Operator::LayoutB>; using TensorRefA = typename SharedStorageA::TensorRef; using TensorRefB = typename SharedStorageB::TensorRef; struct SharedStorage { /// Buffer for A operand SharedStorageA operand_A; /// Buffer for B operand SharedStorageB operand_B; }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE CustomMmaBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorageA& shared_storageA, SharedStorageB& shared_storageB, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storageA.ref(), lane_idx), warp_tile_iterator_B_(shared_storageB.ref(), lane_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/gemm/custom_mma_base.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/gemm/custom_mma_base.h", "repo_id": "cutlass", "token_count": 1908 }
13
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# from typing import List import torch import subprocess import sys import tempfile import os import numpy as np TORCH_DTYPE_NAME = { torch.float32: "f32", torch.float16: "f16", torch.bfloat16: "b16" } NAME_TORCH_DTYPE = {v: k for k, v in TORCH_DTYPE_NAME.items()} def _tensor_from_storage(tensor: torch.Tensor, dtype) -> torch.Tensor: # PyTorch >= 2.0 if hasattr(tensor, 'untyped_storage'): return torch.tensor([], dtype=dtype).set_(tensor.untyped_storage()) return torch.tensor([], dtype=dtype).set_(tensor.storage().untyped()) class PipedSubprocess: def __init__(self, binary: str) -> None: self.binary = binary self.tempdir_ctx = tempfile.TemporaryDirectory() def __enter__(self) -> "PipedSubprocess": self.subp = subprocess.Popen(self.binary, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, text=True, bufsize=0) self.tempdir = self.tempdir_ctx.__enter__() self.file_counter = 0 return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.tempdir_ctx.__exit__(exc_type, exc_val, exc_tb) def temp_filename(self, suffix: str) -> str: self.file_counter += 1 return os.path.join(self.tempdir, f"{self.file_counter}{suffix}") def write(self, *args) -> None: for a in args: self.subp.stdin.write(str(a) + " ") def writeTensor(self, tensor: torch.Tensor, name: str, stride_names: List[str]) -> None: print(f"Py ->C++: {TORCH_DTYPE_NAME[tensor.dtype]}:{name}") tensor_u8 = _tensor_from_storage(tensor, torch.uint8) self.write("tensor_begin", f"{TORCH_DTYPE_NAME[tensor.dtype]}:{name}", tensor_u8.shape[0]) filename = self.temp_filename(f"{name}.tensor") assert tensor.storage_offset() == 0 with open(filename, "wb+") as fd: fd.write(bytes(tensor_u8.numpy())) self.write("file", filename) self.write("tensor_end") for stride_name, stride_value in zip(stride_names, tensor.stride()): self.write(stride_name, stride_value) def readTensor(self, name, stride_name, shape) -> torch.Tensor: tmpfile = self.temp_filename(f"{name}.tensor") self.write("tmpfile", tmpfile) self.readExpect("tensor_begin") dtype_str, name = self.read().split(":") print(f"C++->Py : {dtype_str}:{name}") u8len = int(self.read()) dtype = NAME_TORCH_DTYPE[dtype_str] self.readExpect("file") self.readExpect(tmpfile) with open(tmpfile, "rb") as fd: data = fd.read(u8len) # `np.array` is not strictly needed, but avoids a torch warning tensor_u8 = torch.frombuffer(np.array(data), dtype=torch.uint8, count=u8len) self.readExpect("tensor_end") tensor = _tensor_from_storage(tensor_u8, dtype) strides = [] for sn in stride_name: self.readExpect(sn) strides.append(int(self.read())) if len(strides) != shape: strides.append(1) assert len(strides) == len(shape), name return torch.as_strided(tensor, shape, strides) def readNamed(self, name: str): self.readExpect(name) return self.read() def readExpect(self, what: str) -> None: r = self.read() if r != what: raise ValueError(f"Read {r} but expected {what}") def read(self): read_all = [] # Skip initial whitespace while True: r = self.subp.stdout.read(1) if r not in [' ', "\n"]: read_all.append(r) break # Read data while True: r = self.subp.stdout.read(1) if r in [' ', "\n"]: break read_all.append(r) return ''.join(read_all)
cutlass/examples/41_fused_multi_head_attention/piped_subprocess.py/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/piped_subprocess.py", "repo_id": "cutlass", "token_count": 2303 }
14
<jupyter_start><jupyter_text>Example of using epilogue visitor in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues through CUTLASS Epilogue Visitor.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/04_epilogue_visitor.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example, construct the input and output tensors that will be used in our example.<jupyter_code>import torch import cutlass from cutlass.epilogue import relu from cutlass import Tensor as FakeTensor from cutlass.utils.profiler import CUDAEventProfiler # This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to # omit this information. print_module = True # The Epilogue Visitor feature currently only works for SM80 and 90 from cutlass.backend.utils.device import device_cc if device_cc() not in [80, 90]: import sys sys.exit() m = 16384 n = m k = 512 type_A = torch.float16 type_B = torch.float16 type_C = torch.float16 type_D = torch.float16 torch.manual_seed(2023) scope_min = -4 scope_max = 4 tensor_A = torch.ceil(torch.empty(size=(m, k), dtype=type_A, device="cuda").uniform_(scope_min, scope_max)) tensor_B = torch.ceil(torch.empty(size=(k, n), dtype=type_B, device="cuda").uniform_(scope_min, scope_max)) tensor_C = torch.ceil(torch.empty(size=(m, n), dtype=type_C, device="cuda").uniform_(scope_min, scope_max)) tensor_D = torch.zeros_like(tensor_C) plan = cutlass.op.Gemm(element=torch.float16, layout=cutlass.LayoutType.RowMajor, element_accumulator=torch.float32)<jupyter_output><empty_output><jupyter_text>Define the epilogue visitor functorThe epilogue functor can be defined as a simple Python function and a set of example tensors for inputs and outputs. The example below illustrates a complex epilogue under the directed acyclic graph structure (`F` is used twice). The epilogue takes source tensors in different ranks: `alpha`, `beta` are scalars, `bias` is a column vector to broadcast, and `C`, `aux` are matrices. It contains various math operations from basic arithmatic operations and built-in callable functions like `relu`. It also accomodates multiple outputs `D` and `F`. Note that there are some restrictions on syntax.* Each named variable must be assigned exactly once and defined before it used.* Reserved names: `accum`, `C`, and `D` are reserved for accumulator, tensor_C, and tensor_D.* Return values must be a named variable.The example tensors is a dictionary with tensor names as keys and reference tensors as values. The reference tensors can be `float`, `torch.Tensor`, `numpy.ndarray`, or our `FakeTensor`. They provides the shape and data type information of the inputs and outputs of the epilogue.The epilogue can be generated simply through `cutlass.evt.trace(, )`.<jupyter_code># Define epilogue visitor def example_epilogue(accum, alpha, C, beta, aux, bias): F = alpha * accum + (beta * C + aux) E = relu(F + 1) + bias D = E + F return D, F # Construct inputs and outputs alpha = 0.5 beta = 0.5 aux = torch.ceil(torch.empty(size=(m, n), dtype=type_C, device="cuda").uniform_(scope_min, scope_max)) bias = torch.ceil(torch.empty(size=(m, 1), dtype=type_C, device="cuda").uniform_(scope_min, scope_max)) tensor_F = torch.zeros_like(tensor_D) examples_tensors = { "accum": FakeTensor(element=torch.float32, shape=(m, n), layout_tag=cutlass.LayoutType.RowMajor), "alpha": alpha, "C": tensor_C, "beta": beta, "aux": aux, "bias": bias, "D": tensor_D, "F": tensor_F } # Trace the epilogue visitor epilogue_visitor = cutlass.epilogue.trace(example_epilogue, examples_tensors)<jupyter_output><empty_output><jupyter_text>Run a GEMM with the epilogue visitor functorThe `epilogue_visitor` can be used by setting the plan's `epilogue_visitor` field. The arguments for the epilogue visitor are provided as a `dict` through the `visitor_args` keyword argument.<jupyter_code>visitor_args = { "alpha": alpha, "C": tensor_C, "beta": beta, "aux": aux, "bias": bias, "D": tensor_D, "F": tensor_F } plan.epilogue_visitor = epilogue_visitor plan.run( tensor_A, tensor_B, tensor_C, tensor_D, visitor_args=visitor_args, print_module=print_module)<jupyter_output><empty_output><jupyter_text>The epilogue function `example_epilogue` can be used as a reference function. We can now verify the results simply with<jupyter_code>class TorchReference(torch.nn.Module): def forward(self, A, B, alpha, C, beta, aux, bias): accum = torch.matmul(A, B) return example_epilogue(accum, alpha, C, beta, aux, bias) torch_reference = TorchReference() tensor_D_ref, tensor_F_ref = torch_reference(tensor_A, tensor_B, alpha, tensor_C, beta, aux, bias) assert torch.equal(tensor_D, tensor_D_ref) assert torch.equal(tensor_F, tensor_F_ref)<jupyter_output><empty_output><jupyter_text>The performance of CUTLASS fused kernel can be profiled with<jupyter_code>warmup_iterations = 10 profile_iterations = 50 # Profile CUTLASS fused kernel duration = CUDAEventProfiler( plan, warmup_iterations, profile_iterations, tensor_A, tensor_B, tensor_C, tensor_D, visitor_args=visitor_args)() print(f"CUTLASS duration: {duration:.2f} ms")<jupyter_output><empty_output>
cutlass/examples/python/04_epilogue_visitor.ipynb/0
{ "file_path": "cutlass/examples/python/04_epilogue_visitor.ipynb", "repo_id": "cutlass", "token_count": 1999 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/arch/copy.hpp> #include <cute/tensor.hpp> namespace cute { /** * concept Copy_Traits * { * using ThrID = // Logical thread id (tid) -> tidx * * using SrcLayout = // (Logical src thread id (tid), Logical src value id (vid)) -> bit * using DstLayout = // (Logical dst thread id (tid), Logical dst value id (vid)) -> bit * using RefLayout = // (Logical ref thread id (tid), Logical ref value id (vid)) -> bit * }; * * The abstract bit ordering of the Copy_Traits (the codomain of SrcLayout, DstLayout, and RefLayout) * is arbitrary and only used to construct maps * (ref-tid,ref-vid) -> (src-tid,src-vid) * (ref-tid,ref-vid) -> (dst-tid,dst-vid) * in TiledCopy. The Layout_TV in TiledCopy is in accordance with the RefLayout of a Traits, then mapped to * the Src or Dst (tid,vid) representation on demand. * */ template <class CopyOperation, class... CopyOpArgs> struct Copy_Traits { static_assert(dependent_false<CopyOperation>, "Copy_Traits not implemented for this CopyOperation."); }; template <class S, class D> struct Copy_Traits<UniversalCopy<S,D>> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; }; template <int MaxVecBits> struct Copy_Traits<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; }; // // Generic copy_unpack for common argument-based Copy_Traits // template <class CopyOp, class... Args, class SEngine, class SLayout, class DEngine, class DLayout> CUTE_HOST_DEVICE constexpr void copy_unpack(Copy_Traits<CopyOp,Args...> const&, Tensor<SEngine,SLayout> const& src, Tensor<DEngine,DLayout> & dst) { // Specializations can generalize on these checks //static_assert(is_smem<TS>::value, "Expected smem for this Copy_Traits<CopyOp>"); //static_assert(is_rmem<TD>::value, "Expected rmem for this Copy_Traits<CopyOp>"); using RegistersSrc = typename CopyOp::SRegisters; using RegistersDst = typename CopyOp::DRegisters; using RegTypeSrc = typename remove_extent<RegistersSrc>::type; using RegTypeDst = typename remove_extent<RegistersDst>::type; constexpr int RegNumSrc = extent<RegistersSrc>::value; constexpr int RegNumDst = extent<RegistersDst>::value; Tensor rS = recast<RegTypeSrc>(src); Tensor rD = recast<RegTypeDst>(dst); CUTE_STATIC_ASSERT_V(size(rS) == Int<RegNumSrc>{}, "Copy_Traits: src failed to vectorize into registers. Layout is incompatible with this CopyOp."); CUTE_STATIC_ASSERT_V(size(rD) == Int<RegNumDst>{}, "Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp."); detail::explode(detail::CallCOPY<CopyOp>{}, rS, make_int_sequence<RegNumSrc>{}, rD, make_int_sequence<RegNumDst>{}); } // // Accept mutable temporaries // template <class CopyOp, class... Args, class SEngine, class SLayout, class DEngine, class DLayout> CUTE_HOST_DEVICE constexpr void copy_unpack(Copy_Traits<CopyOp,Args...> const& traits, Tensor<SEngine,SLayout> const& src, Tensor<DEngine,DLayout> && dst) { copy_unpack(traits, src, dst); } } // end namespace cute
cutlass/include/cute/atom/copy_traits.hpp/0
{ "file_path": "cutlass/include/cute/atom/copy_traits.hpp", "repo_id": "cutlass", "token_count": 1984 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if defined(__CUDACC__) || defined(_NVHPC_CUDA) # define CUTE_HOST_DEVICE __forceinline__ __host__ __device__ # define CUTE_DEVICE __forceinline__ __device__ # define CUTE_HOST __forceinline__ __host__ #else # define CUTE_HOST_DEVICE inline # define CUTE_DEVICE inline # define CUTE_HOST inline #endif // CUTE_HOST_DEVICE, CUTE_DEVICE #if defined(__CUDACC_RTC__) # define CUTE_HOST_RTC CUTE_HOST_DEVICE #else # define CUTE_HOST_RTC CUTE_HOST #endif #if !defined(__CUDACC_RTC__) && !defined(__clang__) && \ (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)) # define CUTE_UNROLL #pragma unroll # define CUTE_NO_UNROLL #pragma unroll 1 #elif defined(__CUDACC_RTC__) || defined(__clang__) # define CUTE_UNROLL _Pragma("unroll") # define CUTE_NO_UNROLL _Pragma("unroll 1") #else # define CUTE_UNROLL # define CUTE_NO_UNROLL #endif // CUTE_UNROLL #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) # define CUTE_INLINE_CONSTANT static const __device__ #else # define CUTE_INLINE_CONSTANT static constexpr #endif // __grid_constant__ was introduced in CUDA 11.7. #if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 7))) # define CUTE_GRID_CONSTANT_SUPPORTED #endif // __grid_constant__ can be enabled only on SM70+. #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) # define CUTE_GRID_CONSTANT_ENABLED #endif #if ! defined(CUTE_GRID_CONSTANT) # if defined(CUTE_GRID_CONSTANT_SUPPORTED) && defined(CUTE_GRID_CONSTANT_ENABLED) # define CUTE_GRID_CONSTANT __grid_constant__ # else # define CUTE_GRID_CONSTANT # endif #endif // Some versions of GCC < 11 have trouble deducing that a // function with "auto" return type and all of its returns in an "if // constexpr ... else" statement must actually return. Thus, GCC // emits spurious "missing return statement" build warnings. // Developers can suppress these warnings by using the // CUTE_GCC_UNREACHABLE macro, which must be followed by a semicolon. // It's harmless to use the macro for other GCC versions or other // compilers, but it has no effect. #if ! defined(CUTE_GCC_UNREACHABLE) # if defined(__GNUC__) # define CUTE_GCC_UNREACHABLE __builtin_unreachable() # else # define CUTE_GCC_UNREACHABLE # endif #endif #if defined(_MSC_VER) // Provides support for alternative operators 'and', 'or', and 'not' # include <iso646.h> #endif // _MSC_VER #if defined(__CUDACC_RTC__) # define CUTE_STL_NAMESPACE cuda::std # define CUTE_STL_NAMESPACE_IS_CUDA_STD #else # define CUTE_STL_NAMESPACE std #endif // // Assertion helpers // #if defined(__CUDACC_RTC__) # include <cuda/std/cassert> #else # include <cassert> #endif #define CUTE_STATIC_V(x) decltype(x)::value #define CUTE_STATIC_ASSERT static_assert #define CUTE_STATIC_ASSERT_V(x,...) static_assert(decltype(x)::value, ##__VA_ARGS__) // Fail and print a message. Typically used for notification of a compiler misconfiguration. #if defined(__CUDA_ARCH__) # define CUTE_INVALID_CONTROL_PATH(x) assert(0 && x); printf(x); __brkpt() #else # define CUTE_INVALID_CONTROL_PATH(x) assert(0 && x); printf(x) #endif // // IO // #if !defined(__CUDACC_RTC__) # include <cstdio> # include <iostream> # include <iomanip> #endif // // Support // #include <cute/util/type_traits.hpp> // // Basic types // #include <cute/numeric/numeric_types.hpp> // // Debugging utilities // #include <cute/util/print.hpp> #include <cute/util/debug.hpp>
cutlass/include/cute/config.hpp/0
{ "file_path": "cutlass/include/cute/config.hpp", "repo_id": "cutlass", "token_count": 1930 }
17
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/util/print.hpp" #include "cute/util/type_traits.hpp" #include "cute/numeric/math.hpp" namespace cute { // A constant value: short name and type-deduction for fast compilation template <auto v> struct C { using type = C<v>; static constexpr auto value = v; using value_type = decltype(v); CUTE_HOST_DEVICE constexpr operator value_type() const noexcept { return value; } CUTE_HOST_DEVICE constexpr value_type operator()() const noexcept { return value; } }; // Deprecate template <class T, T v> using constant = C<v>; template <bool b> using bool_constant = C<b>; using true_type = bool_constant<true>; using false_type = bool_constant<false>; // A more std:: conforming integral_constant that enforces type but interops with C<v> template <class T, T v> struct integral_constant : C<v> { using type = integral_constant<T,v>; static constexpr T value = v; using value_type = T; // Disambiguate C<v>::operator value_type() //CUTE_HOST_DEVICE constexpr operator value_type() const noexcept { return value; } CUTE_HOST_DEVICE constexpr value_type operator()() const noexcept { return value; } }; // // Traits // // Use cute::is_std_integral<T> to match built-in integral types (int, int64_t, unsigned, etc) // Use cute::is_integral<T> to match both built-in integral types AND static integral types. template <class T> struct is_integral : bool_constant<is_std_integral<T>::value> {}; template <auto v> struct is_integral<C<v> > : true_type {}; template <class T, T v> struct is_integral<integral_constant<T,v>> : true_type {}; // is_static detects if an (abstract) value is defined completely by it's type (no members) template <class T> struct is_static : bool_constant<is_empty<remove_cvref_t<T>>::value> {}; template <class T> constexpr bool is_static_v = is_static<T>::value; // is_constant detects if a type is a static integral type and if v is equal to a value template <auto n, class T> struct is_constant : false_type {}; template <auto n, class T> struct is_constant<n, T const > : is_constant<n,T> {}; template <auto n, class T> struct is_constant<n, T const&> : is_constant<n,T> {}; template <auto n, class T> struct is_constant<n, T &> : is_constant<n,T> {}; template <auto n, class T> struct is_constant<n, T &&> : is_constant<n,T> {}; template <auto n, auto v> struct is_constant<n, C<v> > : bool_constant<v == n> {}; template <auto n, class T, T v> struct is_constant<n, integral_constant<T,v>> : bool_constant<v == n> {}; // // Specializations // template <int v> using Int = C<v>; using _m32 = Int<-32>; using _m24 = Int<-24>; using _m16 = Int<-16>; using _m12 = Int<-12>; using _m10 = Int<-10>; using _m9 = Int<-9>; using _m8 = Int<-8>; using _m7 = Int<-7>; using _m6 = Int<-6>; using _m5 = Int<-5>; using _m4 = Int<-4>; using _m3 = Int<-3>; using _m2 = Int<-2>; using _m1 = Int<-1>; using _0 = Int<0>; using _1 = Int<1>; using _2 = Int<2>; using _3 = Int<3>; using _4 = Int<4>; using _5 = Int<5>; using _6 = Int<6>; using _7 = Int<7>; using _8 = Int<8>; using _9 = Int<9>; using _10 = Int<10>; using _12 = Int<12>; using _16 = Int<16>; using _24 = Int<24>; using _32 = Int<32>; using _64 = Int<64>; using _96 = Int<96>; using _128 = Int<128>; using _192 = Int<192>; using _256 = Int<256>; using _384 = Int<384>; using _512 = Int<512>; using _768 = Int<768>; using _1024 = Int<1024>; using _2048 = Int<2048>; using _4096 = Int<4096>; using _8192 = Int<8192>; using _16384 = Int<16384>; using _32768 = Int<32768>; using _65536 = Int<65536>; using _131072 = Int<131072>; using _262144 = Int<262144>; using _524288 = Int<524288>; /***************/ /** Operators **/ /***************/ #define CUTE_LEFT_UNARY_OP(OP) \ template <auto t> \ CUTE_HOST_DEVICE constexpr \ C<(OP t)> operator OP (C<t>) { \ return {}; \ } #define CUTE_RIGHT_UNARY_OP(OP) \ template <auto t> \ CUTE_HOST_DEVICE constexpr \ C<(t OP)> operator OP (C<t>) { \ return {}; \ } #define CUTE_BINARY_OP(OP) \ template <auto t, auto u> \ CUTE_HOST_DEVICE constexpr \ C<(t OP u)> operator OP (C<t>, C<u>) { \ return {}; \ } CUTE_LEFT_UNARY_OP(+); CUTE_LEFT_UNARY_OP(-); CUTE_LEFT_UNARY_OP(~); CUTE_LEFT_UNARY_OP(!); CUTE_LEFT_UNARY_OP(*); CUTE_BINARY_OP( +); CUTE_BINARY_OP( -); CUTE_BINARY_OP( *); CUTE_BINARY_OP( /); CUTE_BINARY_OP( %); CUTE_BINARY_OP( &); CUTE_BINARY_OP( |); CUTE_BINARY_OP( ^); CUTE_BINARY_OP(<<); CUTE_BINARY_OP(>>); CUTE_BINARY_OP(&&); CUTE_BINARY_OP(||); CUTE_BINARY_OP(==); CUTE_BINARY_OP(!=); CUTE_BINARY_OP( >); CUTE_BINARY_OP( <); CUTE_BINARY_OP(>=); CUTE_BINARY_OP(<=); #undef CUTE_BINARY_OP #undef CUTE_LEFT_UNARY_OP #undef CUTE_RIGHT_UNARY_OP // // Mixed static-dynamic special cases // template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator*(C<t>, U) { return {}; } template <class U, auto t, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator*(U, C<t>) { return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator/(C<t>, U) { return {}; } template <class U, auto t, __CUTE_REQUIRES(is_std_integral<U>::value && (t == 1 || t == -1))> CUTE_HOST_DEVICE constexpr C<0> operator%(U, C<t>) { return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator%(C<t>, U) { return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator&(C<t>, U) { return {}; } template <class U, auto t, __CUTE_REQUIRES(is_std_integral<U>::value && t == 0)> CUTE_HOST_DEVICE constexpr C<0> operator&(U, C<t>) { return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && !bool(t))> CUTE_HOST_DEVICE constexpr C<false> operator&&(C<t>, U) { return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value && !bool(t))> CUTE_HOST_DEVICE constexpr C<false> operator&&(U, C<t>) { return {}; } template <class U, auto t, __CUTE_REQUIRES(is_std_integral<U>::value && bool(t))> CUTE_HOST_DEVICE constexpr C<true> operator||(C<t>, U) { return {}; } template <class U, auto t, __CUTE_REQUIRES(is_std_integral<U>::value && bool(t))> CUTE_HOST_DEVICE constexpr C<true> operator||(U, C<t>) { return {}; } // // Named functions from math.hpp // #define CUTE_NAMED_UNARY_FN(OP) \ template <auto t> \ CUTE_HOST_DEVICE constexpr \ C<OP(t)> OP (C<t>) { \ return {}; \ } #define CUTE_NAMED_BINARY_FN(OP) \ template <auto t, auto u> \ CUTE_HOST_DEVICE constexpr \ C<OP(t,u)> OP (C<t>, C<u>) { \ return {}; \ } \ template <auto t, class U, \ __CUTE_REQUIRES(is_std_integral<U>::value)> \ CUTE_HOST_DEVICE constexpr \ auto OP (C<t>, U u) { \ return OP(t,u); \ } \ template <class T, auto u, \ __CUTE_REQUIRES(is_std_integral<T>::value)> \ CUTE_HOST_DEVICE constexpr \ auto OP (T t, C<u>) { \ return OP(t,u); \ } CUTE_NAMED_UNARY_FN(abs); CUTE_NAMED_UNARY_FN(signum); CUTE_NAMED_UNARY_FN(has_single_bit); CUTE_NAMED_BINARY_FN(max); CUTE_NAMED_BINARY_FN(min); CUTE_NAMED_BINARY_FN(shiftl); CUTE_NAMED_BINARY_FN(shiftr); CUTE_NAMED_BINARY_FN(gcd); CUTE_NAMED_BINARY_FN(lcm); #undef CUTE_NAMED_UNARY_FN #undef CUTE_NAMED_BINARY_FN // // Other functions // template <auto t, auto u> CUTE_HOST_DEVICE constexpr C<t / u> safe_div(C<t>, C<u>) { static_assert(t % u == 0, "Static safe_div requires t % u == 0"); return {}; } template <auto t, class U, __CUTE_REQUIRES(is_std_integral<U>::value)> CUTE_HOST_DEVICE constexpr auto safe_div(C<t>, U u) { return t / u; } template <class T, auto u, __CUTE_REQUIRES(is_std_integral<T>::value)> CUTE_HOST_DEVICE constexpr auto safe_div(T t, C<u>) { return t / u; } template <class TrueType, class FalseType> CUTE_HOST_DEVICE constexpr decltype(auto) conditional_return(true_type, TrueType&& t, FalseType&&) { return static_cast<TrueType&&>(t); } template <class TrueType, class FalseType> CUTE_HOST_DEVICE constexpr decltype(auto) conditional_return(false_type, TrueType&&, FalseType&& f) { return static_cast<FalseType&&>(f); } // TrueType and FalseType must have a common type template <class TrueType, class FalseType> CUTE_HOST_DEVICE constexpr auto conditional_return(bool b, TrueType const& t, FalseType const& f) { return b ? t : f; } // TrueType and FalseType don't require a common type template <bool b, class TrueType, class FalseType> CUTE_HOST_DEVICE constexpr auto conditional_return(TrueType const& t, FalseType const& f) { if constexpr (b) { return t; } else { return f; } } template <class Trait> CUTE_HOST_DEVICE constexpr auto static_value() { if constexpr (is_std_integral<decltype(Trait::value)>::value) { return Int<Trait::value>{}; } else { return Trait::value; } CUTE_GCC_UNREACHABLE; } // // Display utilities // template <auto Value> CUTE_HOST_DEVICE void print(C<Value>) { printf("_"); ::cute::print(Value); } #if !defined(__CUDACC_RTC__) template <auto t> CUTE_HOST std::ostream& operator<<(std::ostream& os, C<t> const&) { return os << "_" << t; } #endif namespace detail { // parse_int_digits takes a variadic number of digits and converts them into an int template <class... Ts> constexpr uint64_t parse_int_digits(uint64_t result, int digit, Ts... digits) { if constexpr (sizeof...(Ts) == 0) { return 10 * result + digit; } else { return parse_int_digits(10 * result + digit, digits...); } } } // end namespace detail // This user-defined literal operator allows cute::constant written as literals. For example, // // auto var = 32_c; // // var has type cute::constant<int,32>. // template <char... digits> constexpr cute::constant<int,detail::parse_int_digits(0, (digits - '0')...)> operator "" _c() { static_assert((('0' <= digits && digits <= '9') && ...), "Expected 0 <= digit <= 9 for each digit of the integer."); return {}; } } // end namespace cute
cutlass/include/cute/numeric/integral_constant.hpp/0
{ "file_path": "cutlass/include/cute/numeric/integral_constant.hpp", "repo_id": "cutlass", "token_count": 6699 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> // // CUDA compatible print and printf // namespace cute { CUTE_HOST_DEVICE int num_digits(int x) { return (x < 10 ? 1 : (x < 100 ? 2 : (x < 1000 ? 3 : (x < 10000 ? 4 : (x < 100000 ? 5 : (x < 1000000 ? 6 : (x < 10000000 ? 7 : (x < 100000000 ? 8 : (x < 1000000000 ? 9 : 10))))))))); } // // print dispatcher // CUTE_HOST_DEVICE void print(char c) { printf("%c", c); } CUTE_HOST_DEVICE void print(signed char a) { printf("%d", static_cast<int>(a)); } CUTE_HOST_DEVICE void print(unsigned char a) { printf("%u", static_cast<unsigned int>(a)); } CUTE_HOST_DEVICE void print(short a) { printf("%hd", a); } CUTE_HOST_DEVICE void print(unsigned short a) { printf("%hu", a); } CUTE_HOST_DEVICE void print(int a) { printf("%d", a); } CUTE_HOST_DEVICE void print(unsigned int a) { printf("%u", a); } CUTE_HOST_DEVICE void print(long a) { printf("%ld", a); } CUTE_HOST_DEVICE void print(unsigned long a) { printf("%lu", a); } CUTE_HOST_DEVICE void print(long long a) { printf("%lld", a); } CUTE_HOST_DEVICE void print(unsigned long long a) { printf("%llu", a); } CUTE_HOST_DEVICE void print(float a) { printf("%f", a); } CUTE_HOST_DEVICE void print(double a) { printf("%f", a); } template <class... T> CUTE_HOST_DEVICE void print(char const* format, T const&... t) { printf(format, t...); } CUTE_HOST_DEVICE void print(char const* format) { printf("%s", format); } // // pretty printing // template <class T> CUTE_HOST_DEVICE void pretty_print(T const& v) { printf(" "); print(v); } CUTE_HOST_DEVICE void pretty_print(bool const& v) { printf("%*d", 3, int(v)); } CUTE_HOST_DEVICE void pretty_print(int32_t const& v) { printf("%*d", 5, v); } CUTE_HOST_DEVICE void pretty_print(uint32_t const& v) { printf("%*d", 5, v); } CUTE_HOST_DEVICE void pretty_print(int64_t const& v) { printf("%*lld", 5, static_cast<long long>(v)); } CUTE_HOST_DEVICE void pretty_print(uint64_t const& v) { printf("%*llu", 5, static_cast<unsigned long long>(v)); } CUTE_HOST_DEVICE void pretty_print(half_t const& v) { printf("%*.2f", 8, float(v)); } CUTE_HOST_DEVICE void pretty_print(float const& v) { printf("%*.2e", 10, v); } CUTE_HOST_DEVICE void pretty_print(double const& v) { printf("%*.3e", 11, v); } } // end namespace cute
cutlass/include/cute/util/print.hpp/0
{ "file_path": "cutlass/include/cute/util/print.hpp", "repo_id": "cutlass", "token_count": 1621 }
19
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply-accumulate specialzied for SM89 */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// #if (__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 4) # define CUTLASS_ARCH_MMA_SM89_SUPPORTED 1 #endif #if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890) # define CUTLASS_ARCH_MMA_SM89_ENABLED #endif //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { //////////////////////////////////////////////////////////////////////////////// namespace detail { // Whether the Mma uses as SM89 staged accumulation policy template <class Operator> static constexpr bool is_sm89_staged_policy_v = ( // ElementA must be FP8 platform::is_same<typename Operator::ElementA, cutlass::float_e4m3_t>::value || platform::is_same<typename Operator::ElementA, cutlass::float_e5m2_t>::value ) && ( // ElementB must be FP8 platform::is_same<typename Operator::ElementB, cutlass::float_e4m3_t>::value || platform::is_same<typename Operator::ElementB, cutlass::float_e5m2_t>::value ) && ( // The instruction shape must be 16x8x32 Operator::ArchMmaOperator::Shape::kM == 16 && Operator::ArchMmaOperator::Shape::kN == 8 && Operator::ArchMmaOperator::Shape::kK == 32 ) && ( // The operator must be OpMultiplyAdd (default) platform::is_same<typename Operator::MathOperator, OpMultiplyAdd>::value ); } // namespace detail //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // // Matrix Multiply 16832 - Float {E4M3, E5M2}, FP32 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation - F32 = fe4m3 * fe4m3 + F32 template <typename Operator_> struct Mma< gemm::GemmShape<16, 8, 32>, 32, cutlass::float_e4m3_t, layout::RowMajor, cutlass::float_e4m3_t, layout::ColumnMajor, float, layout::RowMajor, Operator_> { static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value || platform::is_same<Operator_, OpMultiplyAddFastAccum>::value, "Invalid operator for SM89 FP8 instruction"); using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = cutlass::float_e4m3_t; using LayoutA = layout::RowMajor; using FragmentA = Array<ElementA, 16>; using ElementB = cutlass::float_e4m3_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<ElementB, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using Operator = Operator_; using ArchTag = arch::Sm89; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM89_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm( "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]) ); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation - F32 = fe4m3 * fe5m2 + F32 template <typename Operator_> struct Mma< gemm::GemmShape<16, 8, 32>, 32, cutlass::float_e4m3_t, layout::RowMajor, cutlass::float_e5m2_t, layout::ColumnMajor, float, layout::RowMajor, Operator_> { static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value || platform::is_same<Operator_, OpMultiplyAddFastAccum>::value, "Invalid operator for SM89 FP8 instruction"); using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = cutlass::float_e4m3_t; using LayoutA = layout::RowMajor; using FragmentA = Array<ElementA, 16>; using ElementB = cutlass::float_e5m2_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<ElementB, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using Operator = Operator_; using ArchTag = arch::Sm89; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM89_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm( "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e5m2.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]) ); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation - F32 = fe5m2 * fe4m3 + F32 template <typename Operator_> struct Mma< gemm::GemmShape<16, 8, 32>, 32, cutlass::float_e5m2_t, layout::RowMajor, cutlass::float_e4m3_t, layout::ColumnMajor, float, layout::RowMajor, Operator_> { static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value || platform::is_same<Operator_, OpMultiplyAddFastAccum>::value, "Invalid operator for SM89 FP8 instruction"); using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = cutlass::float_e5m2_t; using LayoutA = layout::RowMajor; using FragmentA = Array<ElementA, 16>; using ElementB = cutlass::float_e4m3_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<ElementB, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using Operator = Operator_; using ArchTag = arch::Sm89; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM89_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm( "mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e4m3.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]) ); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation - F32 = fe5m2 * fe5m2 + F32 template <typename Operator_> struct Mma< gemm::GemmShape<16, 8, 32>, 32, cutlass::float_e5m2_t, layout::RowMajor, cutlass::float_e5m2_t, layout::ColumnMajor, float, layout::RowMajor, Operator_> { static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value || platform::is_same<Operator_, OpMultiplyAddFastAccum>::value, "Invalid operator for SM89 FP8 instruction"); using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = cutlass::float_e5m2_t; using LayoutA = layout::RowMajor; using FragmentA = Array<ElementA, 16>; using ElementB = cutlass::float_e5m2_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<ElementB, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using Operator = Operator_; using ArchTag = arch::Sm89; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM89_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm( "mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e5m2.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]) ); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/mma_sm89.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sm89.h", "repo_id": "cutlass", "token_count": 4621 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a proxy class for storing non-standard 16-bit floating point values with 8 bits of exponent and 7 bit of mantissa. */ #pragma once #if defined(__CUDACC_RTC__) #include "cutlass/floating_point_nvrtc.h" #else #include <cmath> #include <limits> #include <cstdint> #include <cstring> #endif #include <cuda_bf16.h> #include "cutlass/cutlass.h" #include "cutlass/platform/platform.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Floating-point type with 8 bits of exponent and 7 bits of mantissa. struct alignas(2) bfloat16_t { // // Data members // /// Storage type uint16_t storage; // // Methods // /// Constructs from an unsigned short CUTLASS_HOST_DEVICE static bfloat16_t bitcast(uint16_t x) { bfloat16_t h; h.storage = x; return h; } private: struct from_32_bit_integer_t {}; static constexpr from_32_bit_integer_t from_32_bit_integer{}; template<class T> CUTLASS_HOST_DEVICE explicit bfloat16_t(from_32_bit_integer_t, T x) { static_assert(cutlass::platform::is_integral<T>::value && sizeof(T) == 4, "Requires 32-bit integer"); float flt = static_cast<float>(x); uint32_t bits; #if defined(__CUDA_ARCH__) bits = reinterpret_cast<uint32_t &>(flt); #else std::memcpy(&bits, &flt, sizeof(bits)); #endif storage = uint16_t(bits >> 16); } public: /// Default constructor bfloat16_t() = default; /// Floating-point conversion - round toward nearest CUTLASS_HOST_DEVICE explicit bfloat16_t(float x) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11) asm("cvt.rn.bf16.f32 %0, %1;\n" : "=h"(storage) : "f"(x)); #else uint32_t bits; #if defined(__CUDA_ARCH__) bits = reinterpret_cast<uint32_t &>(x); #else std::memcpy(&bits, &x, sizeof(bits)); #endif if ((bits & 0x7f800000) != 0x7f800000) { bool mantissa_bit = ((bits & (1 << 16)) != 0); bool round_bit = ((bits & (1 << 15)) != 0); bool sticky_bit = ((bits & ((1 << 15) - 1)) != 0); if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) { bits += uint32_t(1 << 16); } } else if (bits & ~0xff800000) { bits = 0x7fffffff; } storage = uint16_t((bits >> 16) & 0xffff); #endif } /// Floating-point conversion - round toward nearest CUTLASS_HOST_DEVICE explicit bfloat16_t(double x): bfloat16_t(float(x)) { } /// Integer conversion - round toward nearest CUTLASS_HOST_DEVICE explicit bfloat16_t(int x) : bfloat16_t(from_32_bit_integer, x) {} CUTLASS_HOST_DEVICE explicit bfloat16_t(uint32_t x) : bfloat16_t(from_32_bit_integer, x) {} /// Converts to float CUTLASS_HOST_DEVICE operator float() const { unsigned bits = (unsigned(storage) << 16); #if defined(__CUDA_ARCH__) return reinterpret_cast<float const &>(bits); #else float flt; std::memcpy(&flt, &bits, sizeof(flt)); return flt; #endif } /// Converts to float CUTLASS_HOST_DEVICE explicit operator double() const { return double(float(*this)); } /// Converts to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(float(*this)); } /// Casts to bool CUTLASS_HOST_DEVICE explicit operator bool() const { return (float(*this) != 0.0f); } /// Obtains raw bits CUTLASS_HOST_DEVICE uint16_t raw() const { return storage; } /// Returns the sign bit CUTLASS_HOST_DEVICE bool signbit() const { return ((raw() & 0x8000) != 0); } /// Returns the biased exponent CUTLASS_HOST_DEVICE int exponent_biased() const { return int((raw() >> 7) & 0x0ff); } /// Returns the unbiased exponent CUTLASS_HOST_DEVICE int exponent() const { return exponent_biased() - 127; } /// Returns the mantissa CUTLASS_HOST_DEVICE int mantissa() const { return int(raw() & 0x7f); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE bool signbit(cutlass::bfloat16_t const& h) { return h.signbit(); } CUTLASS_HOST_DEVICE cutlass::bfloat16_t abs(cutlass::bfloat16_t const& h) { return cutlass::bfloat16_t::bitcast(h.raw() & 0x7fff); } CUTLASS_HOST_DEVICE bool isnan(cutlass::bfloat16_t const& h) { return (h.exponent_biased() == 0x0ff) && h.mantissa(); } CUTLASS_HOST_DEVICE bool isfinite(cutlass::bfloat16_t const& h) { return (h.exponent_biased() != 0x0ff); } CUTLASS_HOST_DEVICE cutlass::bfloat16_t nan_bf16(const char*) { // NVIDIA canonical NaN return cutlass::bfloat16_t::bitcast(0x7fff); } CUTLASS_HOST_DEVICE bool isinf(cutlass::bfloat16_t const& h) { return (h.exponent_biased() == 0x0ff) && !h.mantissa(); } CUTLASS_HOST_DEVICE bool isnormal(cutlass::bfloat16_t const& h) { return h.exponent_biased() && h.exponent_biased() != 0x0ff; } CUTLASS_HOST_DEVICE int fpclassify(cutlass::bfloat16_t const& h) { int exp = h.exponent_biased(); int mantissa = h.mantissa(); if (exp == 0x0ff) { if (mantissa) { return FP_NAN; } else { return FP_INFINITE; } } else if (!exp) { if (mantissa) { return FP_SUBNORMAL; } else { return FP_ZERO; } } return FP_NORMAL; } CUTLASS_HOST_DEVICE cutlass::bfloat16_t sqrt(cutlass::bfloat16_t const& h) { #if defined(__CUDACC_RTC__) return cutlass::bfloat16_t(sqrtf(float(h))); #else return cutlass::bfloat16_t(std::sqrt(float(h))); #endif } CUTLASS_HOST_DEVICE bfloat16_t copysign(bfloat16_t const& a, bfloat16_t const& b) { uint16_t a_bits; uint16_t b_bits; #if defined(__CUDA_ARCH__) a_bits = reinterpret_cast<uint16_t const &>(a); b_bits = reinterpret_cast<uint16_t const &>(b); #else std::memcpy(&a_bits, &a, sizeof(a_bits)); std::memcpy(&b_bits, &b, sizeof(b_bits)); #endif uint16_t a_mag = (a_bits & 0x7fff); uint16_t b_sign = (b_bits & 0x8000); uint16_t result = (a_mag | b_sign); return bfloat16_t::bitcast(result); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // // Standard Library operations and definitions // /////////////////////////////////////////////////////////////////////////////////////////////////// namespace std { #if !defined(__CUDACC_RTC__) /// Numeric limits template <> struct numeric_limits<cutlass::bfloat16_t> { static bool const is_specialized = true; static bool const is_signed = true; static bool const is_integer = false; static bool const is_exact = false; static bool const has_infinity = true; static bool const has_quiet_NaN = true; static bool const has_signaling_NaN = false; static std::float_denorm_style const has_denorm = std::denorm_present; static bool const has_denorm_loss = true; static std::float_round_style const round_style = std::round_to_nearest; static bool const is_iec559 = false; static bool const is_bounded = true; static bool const is_modulo = false; static int const digits = 7; /// Least positive value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t min() { return cutlass::bfloat16_t::bitcast(0x01); } /// Minimum finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t lowest() { return cutlass::bfloat16_t::bitcast(0xff7f); } /// Maximum finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t max() { return cutlass::bfloat16_t::bitcast(0x7f7f); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t epsilon() { return cutlass::bfloat16_t::bitcast(0x1000); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t round_error() { return cutlass::bfloat16_t(0.5f); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t infinity() { return cutlass::bfloat16_t::bitcast(0x7f80); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t quiet_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t signaling_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); } /// Returns smallest finite value CUTLASS_HOST_DEVICE static cutlass::bfloat16_t denorm_min() { return cutlass::bfloat16_t::bitcast(0x1); } }; #endif } // namespace std /////////////////////////////////////////////////////////////////////////////////////////////////// // // Arithmetic operators // /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE bool operator==(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) == float(rhs); } CUTLASS_HOST_DEVICE bool operator!=(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) != float(rhs); } CUTLASS_HOST_DEVICE bool operator<(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) < float(rhs); } CUTLASS_HOST_DEVICE bool operator<=(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) <= float(rhs); } CUTLASS_HOST_DEVICE bool operator>(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) > float(rhs); } CUTLASS_HOST_DEVICE bool operator>=(bfloat16_t const& lhs, bfloat16_t const& rhs) { return float(lhs) >= float(rhs); } CUTLASS_HOST_DEVICE bfloat16_t operator+(bfloat16_t const& lhs, bfloat16_t const& rhs) { return bfloat16_t(float(lhs) + float(rhs)); } CUTLASS_HOST_DEVICE bfloat16_t operator-(bfloat16_t const& lhs) { return bfloat16_t(-float(lhs)); } CUTLASS_HOST_DEVICE bfloat16_t operator-(bfloat16_t const& lhs, bfloat16_t const& rhs) { return bfloat16_t(float(lhs) - float(rhs)); } CUTLASS_HOST_DEVICE bfloat16_t operator*(bfloat16_t const& lhs, bfloat16_t const& rhs) { return bfloat16_t(float(lhs) * float(rhs)); } CUTLASS_HOST_DEVICE bfloat16_t operator/(bfloat16_t const& lhs, bfloat16_t const& rhs) { return bfloat16_t(float(lhs) / float(rhs)); } CUTLASS_HOST_DEVICE bfloat16_t& operator+=(bfloat16_t & lhs, bfloat16_t const& rhs) { lhs = bfloat16_t(float(lhs) + float(rhs)); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t& operator-=(bfloat16_t & lhs, bfloat16_t const& rhs) { lhs = bfloat16_t(float(lhs) - float(rhs)); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t& operator*=(bfloat16_t & lhs, bfloat16_t const& rhs) { lhs = bfloat16_t(float(lhs) * float(rhs)); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t& operator/=(bfloat16_t & lhs, bfloat16_t const& rhs) { lhs = bfloat16_t(float(lhs) / float(rhs)); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t& operator++(bfloat16_t & lhs) { float tmp(lhs); ++tmp; lhs = bfloat16_t(tmp); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t& operator--(bfloat16_t & lhs) { float tmp(lhs); --tmp; lhs = bfloat16_t(tmp); return lhs; } CUTLASS_HOST_DEVICE bfloat16_t operator++(bfloat16_t & lhs, int) { bfloat16_t ret(lhs); float tmp(lhs); tmp++; lhs = bfloat16_t(tmp); return ret; } CUTLASS_HOST_DEVICE bfloat16_t operator--(bfloat16_t & lhs, int) { bfloat16_t ret(lhs); float tmp(lhs); tmp--; lhs = bfloat16_t(tmp); return ret; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // // User-defined literals // CUTLASS_HOST_DEVICE cutlass::bfloat16_t operator "" _bf16(long double x) { return cutlass::bfloat16_t(float(x)); } CUTLASS_HOST_DEVICE cutlass::bfloat16_t operator "" _bf16(unsigned long long int x) { return cutlass::bfloat16_t(int(x)); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/bfloat16.h/0
{ "file_path": "cutlass/include/cutlass/bfloat16.h", "repo_id": "cutlass", "token_count": 5124 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This file contains definitions and utility functions for describing convolution problem sizes in terms of activation (NHWC), filter (KRSC), output (NPQK), padding (pad_h, pad_w), stride (stride_h, stride_w), and dilation (dilation_h, dilation_w). Furthermore, it defines helper functions to map CUTLASS's implicit gemm tensor extents, sizes, and data types to that of the convolution's extents, sizes, and data types. * Mapping convolutions to Gemm computation * Cutlass implements convolutions with the Implicit Gemm algorithm. This algorithm performs a gemm (general matrix-matrix multiply) on the convolution tensors Activation, Filter, and Output. The underlying gemm operation follows the standard gemm definition: C = A * B + C A and B are input matrices C is source and output matrix For the three convolutional operators (Fprop, Dgrad, Wgrad), ImplicitGemm matrices A, B, and C are mapped to convolution tensors Activation, Filter and Output as described in the table below. ___________________________________________________________________________ ConvolutionalOperator | A | B | C ___________________________________________________________________________ | | | | | | Fprop | Activation | Filter | Output | | Dgrad | Output | Filter | Activation | | Wgrad | Output | Activation | Filter | ___________________________________________________________________________ In convolution codebase, DO NOT mix using (A, B, C) with (Activation, Filter, Output). For example, it's confusing and error prone to document a convolution class or function as operating on "A, B, Output." Instead, use the mapping functions below, and adhere to using either A, B, C or Activation, Filter, Output. Map elements' data types (ImplicitGemm -> Conv): GemmToConvElementMap Map elements' data types (Conv -> ImplicitGemm): ConvToGemmElementMap */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm_enumerated_types.h" #include "cutlass/matrix_coord.h" namespace cutlass { namespace conv { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Convolutional operator enum class Operator { kFprop, kDgrad, kWgrad, kDeconv }; /// Distinguishes convolution from cross correlation enum class Mode { kCrossCorrelation, kConvolution }; /// Selects among several implementation variants trading off performance with simplicity enum class IteratorAlgorithm { kAnalytic, ///< functionally correct in all cases but lower performance kOptimized, ///< optimized for R <= 32, S <= 32 and unity-stride dgrad kFixedChannels, ///< Analytic algorithm optimized for fixed channel count (C == AccessSize) kFewChannels, ///< Analytic algorithm optimized for few channels (C divisible by AccessSize) kFixedStrideDilation ///< Optimized for fixed stride and dilation }; /// Distinguishes among partial specializations that accelerate certain problems where convolution /// stride is unit. enum class StrideSupport { kStrided, ///< arbitrary convolution stride kUnity, ///< unit convolution stride kFixed ///< fixed convolution stride }; /// Identifies split-K mode enum class SplitKMode { kNone, kSerial, kParallel }; /// Identifies group mode enum class GroupMode { kNone, kSingleGroup, ///< One CTA calculates one group or less kMultipleGroup, ///< One CTA calculates multiple groups kDepthwise ///< One CTA calculates cta_n groups (problem_size.C == problem_size.K == problem_size.groups) }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Shape of a tensor template < int N = 1, int H = 1, int W = 1, int C = 1 > struct TensorNHWCShape { static int const kN = N; static int const kH = H; static int const kW = W; static int const kC = C; static int const kHW = H * W; static int const kNHW = N * kHW; static int const kNHWC = N * H * W * C; static int const kCount = kNHWC; // // Static member functions // /// Returns a Coord object CUTLASS_HOST_DEVICE static Coord<4> toCoord() { return make_Coord(kN, kH, kW, kC); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Shape of a conv2d stride, which controls how the filter convolves around the input volume template < /// Stride in horizontal direction int u = 1, /// Stride in vertical direction int v = 1 > struct Stride2D { static int const kU = u; static int const kV = v; // // Static member functions // /// Returns a Coord object CUTLASS_HOST_DEVICE static Coord<2> toCoord() { return make_Coord(kU, kV); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/convolution.h/0
{ "file_path": "cutlass/include/cutlass/conv/convolution.h", "repo_id": "cutlass", "token_count": 2239 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template wraps the tile access iterator concept to load whole tiles from tensors in memory used for implicit GEMM convolution. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename TileAccessIterator_> class TileIterator { public: using TileAccessIterator = TileAccessIterator_; using Shape = typename TileAccessIterator::Shape; using Element = typename TileAccessIterator::Element; using Layout = typename TileAccessIterator::Layout; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = typename TileAccessIterator::ThreadMap; using AccessType = typename TileAccessIterator::AccessType; using TensorRef = typename TileAccessIterator::TensorRef; using Index = typename TileAccessIterator::Index; using LongIndex = typename TileAccessIterator::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm; static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport; using Params = typename TileAccessIterator::Params; static int const kConvDim = TileAccessIterator::kConvDim; using ConvProblemSize = typename TileAccessIterator::ConvProblemSize; static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; private: /// Internal state TileAccessIterator tile_access_iterator_; public: /// Constructor CUTLASS_HOST_DEVICE TileIterator( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): tile_access_iterator_(params, problem_size, ptr, thread_idx, threadblock_offset) { } CUTLASS_HOST_DEVICE static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) { return TileAccessIterator::getParams(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { tile_access_iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { tile_access_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE TileIterator &operator++() { tile_access_iterator_.advance(); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE TileIterator operator++(int) { TileIterator self(*this); operator++(); return self; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { frag.clear(); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[idx], tile_access_iterator_.get() + pointer_offset, tile_access_iterator_.valid() ); ++tile_access_iterator_; } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { tile_access_iterator_.set_iteration_index(0); load_with_pointer_offset(frag, 0); } CUTLASS_DEVICE void advance() { tile_access_iterator_.advance(); } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(ConvProblemSize const &problem_size) { // dispatch to iterator implementation return TileAccessIterator::can_implement(problem_size); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Strided Dgrad Tile Iterator template <typename TileAccessIterator_> class TileIteratorStridedDgrad { public: using TileAccessIterator = TileAccessIterator_; using Shape = typename TileAccessIterator::Shape; using Element = typename TileAccessIterator::Element; using Layout = typename TileAccessIterator::Layout; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = typename TileAccessIterator::ThreadMap; using AccessType = typename TileAccessIterator::AccessType; using TensorRef = typename TileAccessIterator::TensorRef; using Index = typename TileAccessIterator::Index; using LongIndex = typename TileAccessIterator::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm; static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport; using Params = typename TileAccessIterator::Params; static int const kConvDim = TileAccessIterator::kConvDim; using ConvProblemSize = typename TileAccessIterator::ConvProblemSize; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; private: /// Internal state TileAccessIterator tile_access_iterator_; public: /// Constructor (output gradient (Dy) OperandA ctor) CUTLASS_HOST_DEVICE TileIteratorStridedDgrad( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int start_r, int start_s, MatrixCoord const &threadblock_offset = MatrixCoord() ): tile_access_iterator_( params, problem_size, ptr, thread_idx, stride_h_divmod, stride_w_divmod, start_r, start_s, threadblock_offset) { } /// Constructor (filter (w) OperandB ctor) CUTLASS_HOST_DEVICE TileIteratorStridedDgrad( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, int start_r, int start_s, MatrixCoord const &threadblock_offset = MatrixCoord() ): tile_access_iterator_(params, problem_size, ptr, thread_idx, start_r, start_s, threadblock_offset) { } CUTLASS_HOST_DEVICE static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) { return TileAccessIterator::getParams(problem_size, layout); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { tile_access_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE TileIteratorStridedDgrad &operator++() { tile_access_iterator_.advance(); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE TileIteratorStridedDgrad operator++(int) { TileIteratorStridedDgrad self(*this); operator++(); return self; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { frag.clear(); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[c + s * ThreadMap::Iterations::kContiguous], tile_access_iterator_.get() + pointer_offset, tile_access_iterator_.valid() ); ++tile_access_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { tile_access_iterator_.set_iteration_index(0); load_with_pointer_offset(frag, 0); } CUTLASS_DEVICE void advance() { tile_access_iterator_.advance(); } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(ConvProblemSize const &problem_size) { // dispatch to iterator implementation return TileAccessIterator::can_implement(problem_size); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv2d_tile_iterator.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_tile_iterator.h", "repo_id": "cutlass", "token_count": 3649 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_ > class Conv3dWgradOutputGradientTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; static_assert(sizeof_bits<Element>::value >= 8, "WGRAD requires elements of size 8b or greater."); // // Parameters structure // struct Params { Layout layout; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( Conv3dProblemSize const &problem_size, Layout const &layout ): layout(layout) { } }; private: Params const &params_; Conv3dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; int filter_k_[ThreadMap::Iterations::kContiguous]; int offset_nzpq_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientTileAccessIteratorAnalytic( Params const &params, Conv3dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); // initialize filter_k for every contiguous iteration CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous() + c * ThreadMap::Delta::kContiguous; } // initialize n, p, q offset for every strided iteration CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_nzpq_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; } } CUTLASS_HOST_DEVICE static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // moves to the next GEMM-K offset (offset_nzpq_) in GEMM-A by a CTA-K tile CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_nzpq_[s] += Shape::kColumn * problem_size_.split_k_slices; } } /// Returns the coordinate in the output gradient tensor Dy that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int nzpq = offset_nzpq_[iteration_strided_]; int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q); int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q); int z = residual / (problem_size_.P * problem_size_.Q); residual = residual % (problem_size_.P * problem_size_.Q); int p = residual / problem_size_.Q; int q = residual % problem_size_.Q; return TensorCoord(n, z, p, q, filter_k_[iteration_contiguous_]); } /// Returns true if the current coordinate is within the output gradient tensor Dy CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.d() < problem_size_.Z && coord.h() < problem_size_.P && coord.w() < problem_size_.Q && coord.c() < problem_size_.K; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv3dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.K % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h", "repo_id": "cutlass", "token_count": 2871 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implements several possible threadblock-swizzling functions mapping blockIdx to Convolution problems. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/platform/platform.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE static int get_strided_dgrad_tile_m( cutlass::conv::Conv2dProblemSize const &problem_size, int tile_size_m) { // CTAs in M dimension per starting filter position int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, tile_size_m); // Inflate number of CTAs in M dimension to cover every strating filter position even those that // may fall out of valid MMA (Dy * w) but are needed to apply epilogue (beta * Dx_source) // and point-wise fusion int tile_m = tile_m_per_filter * int(problem_size.stride().product()); // There is a possible performance optimization here that leads up to 2x speeds than the current // CUTLASS strided dgrad performance for stride > filter, i.e., stride={2x2} and filter={1x1}) // // * Optimization * // Only launch CTAs in M dimension which contribute to a row in Dx output // // // * Constraints * // (A) stride <= filter, for example, stride={2x2} and filter={3x3}: // - (A.1): There are no constraints for this case and the optimization does // affect this case functionality or performance. // (B) stride > filter, for example, stride={2x2} and filter={1x1}: // - (B.1): Dx output tensor should be zero initialized // - (B.2): The kernel epilogue cannot apply beta. Thus, beta should be zero return tile_m; } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Threadblock swizzling function for strided dgrad convolution struct StridedDgradHorizontalThreadblockSwizzle : public gemm::threadblock::GemmHorizontalThreadblockSwizzle { using Base = gemm::threadblock::GemmHorizontalThreadblockSwizzle; CUTLASS_HOST_DEVICE StridedDgradHorizontalThreadblockSwizzle() { } /// Returns the shape of the problem in units of logical tiles /// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC) CUTLASS_HOST_DEVICE static gemm::GemmCoord get_tiled_shape( cutlass::conv::Operator conv_operator, cutlass::conv::Conv2dProblemSize const &problem_size, gemm::GemmCoord tile_size, int split_k_slices) { gemm::GemmCoord implicit_gemm_problem_size = cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size); // compute number of tiles in m dimension int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m()); // compute number of tiles in n dimension int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(); return gemm::GemmCoord( tile_m, tile_n, split_k_slices); } /// Returns the shape of the problem in units of logical tiles /// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape()) private: using Base::get_tiled_shape; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Threadblock swizzling function for strided dgrad convolution template <int N = 1> struct StridedDgradIdentityThreadblockSwizzle : public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> { using Base = gemm::threadblock::GemmIdentityThreadblockSwizzle<N>; CUTLASS_HOST_DEVICE StridedDgradIdentityThreadblockSwizzle() { } /// Returns the shape of the problem in units of logical tiles /// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC) CUTLASS_HOST_DEVICE static gemm::GemmCoord get_tiled_shape( cutlass::conv::Operator conv_operator, cutlass::conv::Conv2dProblemSize const &problem_size, gemm::GemmCoord tile_size, int split_k_slices) { gemm::GemmCoord implicit_gemm_problem_size = cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size); // compute number of tiles in m dimension int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m()); // compute number of tiles in n dimension int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(); return gemm::GemmCoord( tile_m, tile_n, split_k_slices); } /// Returns the shape of the problem in units of logical tiles /// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape()) private: using Base::get_tiled_shape; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Threadblock swizzling function for GEMMs template <int N = 1, int Output_N = 1, int Output_P = 1, int Output_Q = 1> struct DepthwiseDirect2dConvIdentityThreadblockSwizzle : public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> { CUTLASS_HOST_DEVICE DepthwiseDirect2dConvIdentityThreadblockSwizzle() {} /// Returns the shape of the problem in units of logical tiles CUTLASS_HOST_DEVICE static gemm::GemmCoord get_tiled_shape(cutlass::conv::Operator conv_operator, cutlass::conv::Conv2dProblemSize const &problem_size, gemm::GemmCoord tile_size, int split_k_slices) { gemm::GemmCoord implicit_gemm_problem_size = cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size); return gemm::GemmCoord(1, (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(), split_k_slices); } }; } // namespace threadblock } // namespace conv } // namespace cutlass
cutlass/include/cutlass/conv/threadblock/threadblock_swizzle.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/threadblock_swizzle.h", "repo_id": "cutlass", "token_count": 2604 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This extends the contents of cutlass/functional.h with frequently used activation functions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/constants.h" #include "cutlass/complex.h" #include "cutlass/array.h" #include "cutlass/half.h" #include "cutlass/functional.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// // Identity operator template <typename T> struct Identity { static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE T operator()(T value) const { return value; } }; template <typename T, int N> struct Identity<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> value) const { return value; } }; /// Scale operator template <typename T> struct Scale { struct Arguments { using scale_type = T; T scale = T(1); }; CUTLASS_HOST_DEVICE T operator()(T value, T scale) const { multiplies<T> mul; return mul(scale, value); } CUTLASS_HOST_DEVICE T operator()(T value, Arguments args = Arguments()) const { return this->operator()(value, args.scale); } }; template <typename T, int N> struct Scale<Array<T, N>> { using Arguments = typename Scale<T>::Arguments; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> values, T scale) const { multiplies<Array<T, N>> mul; return mul(scale, values); } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> values, Arguments args = Arguments()) const { return this->operator()(values, args.scale); } }; /// Specialization to compose other activations with a defined unary operator /// e.g. Scale<Identity<T>> template <template <class> class Activation, typename T> struct Scale<Activation<T>> { using Arguments = typename Scale<T>::Arguments; CUTLASS_HOST_DEVICE T operator()(T value, typename Arguments::scale_type scale) const { multiplies<T> mul; Activation<T> act; return mul(scale, act(value)); } CUTLASS_HOST_DEVICE T operator()(T value, Arguments args = Arguments()) const { return this->operator()(value, args.scale); } }; /// ReLu operator - propagates NaNs /// Always put threshold in the right hand side of max to propagate NaN. template <typename T> struct ReLu { static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE T operator()(T threshold, T value) const { maximum<T> mx; return mx(value, threshold); } CUTLASS_HOST_DEVICE T operator()(T value) const { maximum<T> mx; return mx(value, T(0)); } }; template <typename T> using ReLU = ReLu<T>; template <typename T, int N> struct ReLu<Array<T, N>> { static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE Array<T, N> operator()(T const & threshold, Array<T, N> const &frag) const { maximum<Array<T, N>> mx; return mx(frag, threshold); } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &frag) const { maximum<Array<T, N>> mx; return mx(frag, T(0)); } }; // Generic clamp template <typename T> struct Clamp { struct Arguments { T lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::lowest(); T upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::max(); }; CUTLASS_HOST_DEVICE T operator()(T const& value, T const& lower_bound, T const& upper_bound) const { maximum<T> mx; minimum<T> mn; return mn(mx(value, lower_bound), upper_bound); } CUTLASS_HOST_DEVICE T operator()(T const& value, Arguments const& args = Arguments()) const { return this->operator()(value, args.lower_bound, args.upper_bound); } }; template <typename T, int N> struct Clamp<Array<T,N>> { using Arguments = typename Clamp<T>::Arguments; CUTLASS_HOST_DEVICE Array<T,N> operator()(Array<T,N> const& values, T const& lower_bound, T const& upper_bound) const { maximum<Array<T,N>> mx; minimum<Array<T,N>> mn; return mn(mx(values, lower_bound), upper_bound); } CUTLASS_HOST_DEVICE Array<T,N> operator()(Array<T,N> const& values, Arguments const& args = Arguments()) const { return this->operator()(values, args.lower_bound, args.upper_bound); } }; // Leaky Relu operator template <typename T> struct LeakyReLU { static const bool kIsHeavy = false; struct Arguments { T leaky_alpha = T(0); }; CUTLASS_HOST_DEVICE T operator()(T const& value, T const& leaky_alpha) const { T res = value > T(0) ? value : value * leaky_alpha; return res; } CUTLASS_HOST_DEVICE T operator()(T const& value, Arguments const& args = Arguments()) const { this->operator()(value, args.leaky_alpha); } }; template <typename T, int N> struct LeakyReLU<Array<T, N> > { static const bool kIsHeavy = false; using Arguments = typename LeakyReLU<T>::Arguments; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& values, T const& leaky_alpha) const { Array<T, N> y; LeakyReLU<T> leaky_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < int(values.size()); ++i) { y[i] = leaky_op(values[i], leaky_alpha); } return y; } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& values, Arguments const& args = Arguments()) const { return this->operator()(values, args.leaky_alpha); } }; // Tanh operator template <typename T> struct Tanh { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &value) const { return fast_tanh(value); } }; template <typename T, int N> struct Tanh<Array<T, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; Tanh<T> tanh_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = tanh_op(value[i]); } return y; } }; template <int N> struct Tanh<Array<half_t, N>> { using T = half_t; static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& z) const { fast_tanh_op<Array<T, N>> tanh; return tanh(z); } }; // Sigmoid operator template <typename T> struct Sigmoid { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &value) const { return T(1) / (T(1) + fast_exp(-value)); } }; template <typename T, int N> struct Sigmoid<Array<T, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; Sigmoid<T> sigmoid_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = sigmoid_op(value[i]); } return y; } }; template <int N> struct Sigmoid<Array<half_t, N>> { using T = half_t; static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& z) const { plus<Array<T, N>> add; #if defined(CUTLASS_USE_TANH_FOR_SIGMOID) multiplies<Array<T, N>> mul; fast_tanh_op<Array<T, N>> tanh; return mul(add(tanh(mul(z, cutlass::constants::half<T>())), cutlass::constants::one<T>()), cutlass::constants::half<T>()); #else divides<Array<T, N>> div; negate<Array<T, N>> neg; fast_exp_op<Array<T, N>> fast_exp; return div(cutlass::constants::one<T>(), add(cutlass::constants::one<T>(), fast_exp(neg(z)))); #endif } }; // SiLu (swish) operator introduced by Elfwing et al. in the following paper // "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning" (2017) // https://arxiv.org/pdf/1702.03118.pdf // It is used in EfficientNet and YOLOv5, for example. // Reference: https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html template <typename T> struct SiLu { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &value) const { Sigmoid<T> sigmoid; return value * sigmoid(value); } }; template <typename T, int N> struct SiLu<Array<T, N>> { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Sigmoid<Array<T, N>> sigmoid_op; multiplies<Array<T, N>> mul; return mul(value, sigmoid_op(value)); } }; // Hardswish operator introduced by Howard et al. in the following paper // "Searching for MobileNetV3" (2019) // https://arxiv.org/pdf/1905.02244.pdf // It is used in models based on MobilenetNetV3. // Reference: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html template <typename T> struct HardSwish { static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE T operator()(T const &x) const { minimum<T> mn; maximum<T> mx; T relu6 = mn(mx(x + T(3), T(0)), T(6)); return x * relu6 / T(6); } }; template <> struct HardSwish<float> { using T = float; static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE T operator()(T const &x) const { minimum<T> mn; maximum<T> mx; T relu6 = mn(mx(x + T(3), T(0)), T(6)); return x * relu6 * 0.16666667f; } }; template <typename T, int N> struct HardSwish<Array<T, N> > { static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; HardSwish<T> hardswish_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = hardswish_op(value[i]); } return y; } }; template <int N> struct HardSwish<Array<half_t, N> > { using T = half_t; static const bool kIsHeavy = false; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { minimum<Array<T, N> > mn; maximum<Array<T, N> > mx; multiplies<Array<T, N> > mul; plus<Array<T, N> > add; return mul(mul(mn(mx(add(value, T(3)), T(0)), T(6)), value), T(0.16666667f)); } }; // // GELU function definitions implemented as described by // Hendrycks, D., and Gimpel, K. in // "Gaussian Error Linear Units (GELUs)." (2020) // https://arxiv.org/pdf/1606.08415.pdf // // Floating-point constants are Taylor coefficients described in the paper. // // GELU operator template <typename T> struct GELU { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &value) const { return T(cutlass::constants::half<T>() * value * (cutlass::constants::one<T>() + (T)erff((float)(value * cutlass::constants::half_root_two<T>())))); } }; template <> struct GELU<float> { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE float operator()(float const &value) const { return cutlass::constants::half<float>() * value * (cutlass::constants::one<float>() + erff(value * cutlass::constants::half_root_two<float>() )); } }; template <> struct GELU<double> { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE double operator()(double const &value) const { return cutlass::constants::half<double>() * value * (cutlass::constants::one<double>() + erf( value * cutlass::constants::half_root_two<double>() )); } }; template <typename T, int N> struct GELU<Array<T, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; GELU<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(value[i]); } return y; } }; template <typename T> using ScaledGELU = Scale<GELU<T>>; // GELU operator implemented using the Taylor series approximation template <typename T> struct GELU_taylor { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &z) const { T k0 = T(0.7978845608028654); T k1 = T(0.044715); return T(cutlass::constants::half<T>() * z * (cutlass::constants::one<T>() + fast_tanh(k0 * z * (cutlass::constants::one<T>() + k1 * z * z)))); } }; template <int N> struct GELU_taylor<Array<half_t, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<half_t, N> operator()(Array<half_t, N> const &z) const { using T = half_t; Array<half_t, N> y; half_t k0 = half_t(0.7978845608028654); half_t k1 = half_t(0.044715); multiply_add<Array<half_t, N>> fma; multiplies<Array<half_t, N>> mul; plus<Array<half_t, N>> add; fast_tanh_op<Array<half_t, N>> tanh; Array<half_t, N> u = mul(mul(k0, z), fma(mul(k1, z), z, cutlass::constants::one<T>())); y = mul(mul(z, cutlass::constants::half<T>()), add(cutlass::constants::one<T>(), tanh(u))); return y; } }; template <typename T, int N> struct GELU_taylor<Array<T, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; GELU_taylor<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(value[i]); } return y; } }; template <typename T> using ScaledGELU_taylor = Scale<GELU_taylor<T>>; /// Computes backwards pass for GELU operator assuming d_t is the layer gradient and /// z is computed from the forward pass. template <typename T> struct dGELU { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE T operator()(T const &d_t, T const &z) const { T k0 = T(0.7978845608028654); T k1 = T(0.044715); T k2 = T(0.1070322243); T tanh_out = fast_tanh(k0 * z * (1 + k1 * z * z)); T ff = constants::half<T>() * z * ((1 - tanh_out * tanh_out) * (k0 + k2 * z * z)) + constants::half<T>() * (1 + tanh_out); return ff * d_t; } }; template <typename T, int N> struct dGELU<Array<T, N> > { static const bool kIsHeavy = true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &d_t, Array<T, N> const &z) const { Array<T, N> y; dGELU<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(d_t[i], z[i]); } return y; } }; template <typename T> struct dReLU { CUTLASS_HOST_DEVICE T operator()(T d_t, bool d_relu) const { return d_relu ? d_t : T(0); } template <typename U> CUTLASS_HOST_DEVICE T operator()(T d_t, U d_relu) const { return operator()(d_t, static_cast<bool>(d_relu)); } }; template <typename T, int N> struct dReLU<Array<T, N>> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& d_t, bool const (&d_relu)[N]) const { Array<T, N> y; dReLU<T> relu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = relu_op(d_t[i], d_relu[i]); } return y; } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& d_t, Array<uint1b_t, N> const& d_relu) const { UnpackPredicates<N> unpack_op; bool preds[N]; unpack_op(preds, d_relu); return operator()(d_t, preds); } template <typename U> CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& d_t, Array<U, N> const& d_relu) const { Array<T, N> y; dReLU<T> relu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = relu_op(d_t[i], d_relu[i]); } return y; } }; /// Computes backwards pass for ReLU operator assuming d_t is the layer gradient and /// z is computed from the forward pass. template <typename T> struct dReLU_Z { CUTLASS_HOST_DEVICE T operator()(T d_t, T z) const { return z < 0 ? T(0) : d_t; } }; template <typename T, int N> struct dReLU_Z<Array<T, N>> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& d_t, Array<T, N> const& z) const { Array<T, N> y; dReLU_Z<T> relu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = relu_op(d_t[i], z[i]); } return y; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/activation.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/activation.h", "repo_id": "cutlass", "token_count": 7230 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with a maximum operation used by epilogues. */ #pragma once #include "cutlass/half.h" #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Single source of truth for whether to unroll for `LinearCombinationClamp()` constexpr bool LinearCombinationReluIsHeavy() { return false; } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationRelu { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; using FragmentSource = Array<ElementOutput, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy(); /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0), ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conditional guards to enable partial specialization for packed integers #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// /// Special handling for int types template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ScaleType::Kind Scale, ///< Control Alpha and Beta scaling FloatRoundStyle Round > class LinearCombinationRelu <ElementOutput_, Count, int, float, Scale, Round> { public: using ElementOutput = ElementOutput_; using ElementAccumulator = int; using ElementCompute = float; static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy(); static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; using FragmentSource = Array<ElementOutput, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0), ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } }; #endif // Conditional guards to enable partial specialization for packed integers ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/linear_combination_relu.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_relu.h", "repo_id": "cutlass", "token_count": 6857 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops on Volta. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/layout/permute.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueVoltaTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueVoltaTensorOpStridedDgrad { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < int Rank, typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueVoltaTensorOpAffineRankN { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h", "repo_id": "cutlass", "token_count": 3440 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/array_planar_complex.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator for planar-complex output representations. /// /// Note, as with most CUTLASS components for planar complex, the template arguments describe /// the underlying real data type. template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) > class EpiloguePlanarComplex { public: using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; /// Output layout is always row-major using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = ArrayPlanarComplex< typename WarpMmaOperator::FragmentC::Element, WarpMmaOperator::FragmentC::kElements >; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Shape of each warp-level operation using WarpShape = typename WarpMmaOperator::Shape; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Shared memory allocation struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape< WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK, WarpCount::kN * WarpTileIterator::Shape::kColumn >; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< Shape::kRow + Padding::kRow, Shape::kColumn + Padding::kColumn >; static int const kImaginaryStride = StorageShape::kCount; // // Data members // AlignedBuffer<Element, kImaginaryStride * 2> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef( storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; private: // // Data members // SharedStorage &shared_storage_; /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; public: /// Constructor CUTLASS_DEVICE EpiloguePlanarComplex( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): shared_storage_(shared_storage), shared_load_iterator_(shared_storage.reference(), thread_idx), warp_tile_iterator_(shared_storage.reference(), lane_idx) { // Compute warp location within threadblock tile by mapping the warp_id to three coordinates: // // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN); int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN); int warp_m = warp_mn % WarpCount::kM; int warp_n = warp_mn / WarpCount::kM; MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n}; warp_tile_iterator_.add_tile_offset(warp_offset); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator_real, ///< Tile iterator for destination OutputTileIterator destination_iterator_imag, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator_real, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) OutputTileIterator source_iterator_imag) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) typename OutputTileIterator::Fragment source_fragment_real; typename OutputTileIterator::Fragment source_fragment_imag; if (!output_op.is_source_needed()) { source_iterator_real.clear_mask(); source_iterator_imag.clear_mask(); } source_fragment_real.clear(); source_fragment_imag.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator_real(accumulators.real); AccumulatorFragmentIterator accum_fragment_iterator_imag(accumulators.imag); // // Iterate over accumulator tile // CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator_real.load(source_fragment_real); source_iterator_imag.load(source_fragment_imag); ++source_iterator_real; ++source_iterator_imag; // // Convert and store fragment // __syncthreads(); typename AccumulatorFragmentIterator::Fragment accum_fragment_real; typename AccumulatorFragmentIterator::Fragment accum_fragment_imag; accum_fragment_iterator_real.load(accum_fragment_real); accum_fragment_iterator_imag.load(accum_fragment_imag); ++accum_fragment_iterator_real; ++accum_fragment_iterator_imag; this->warp_tile_iterator_.store(accum_fragment_real); this->warp_tile_iterator_.store_with_pointer_offset(accum_fragment_imag, SharedStorage::kImaginaryStride); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment_real[kPartitionsK]; typename SharedLoadIterator::Fragment aligned_accum_fragment_imag[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment_real[0]); shared_load_iterator_.load_with_pointer_offset(aligned_accum_fragment_imag[0], SharedStorage::kImaginaryStride); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices static_assert(kPartitionsK == 1, "Sliced-K not supported for planar complex at this time"); // // Compute the output result // typename OutputTileIterator::Fragment output_fragment_real; typename OutputTileIterator::Fragment output_fragment_imag; apply_output_operator_( output_fragment_real, output_fragment_imag, output_op, aligned_accum_fragment_real[0], aligned_accum_fragment_imag[0], source_fragment_real, source_fragment_imag); // // Store the final result // destination_iterator_real.store(output_fragment_real); destination_iterator_imag.store(output_fragment_imag); ++destination_iterator_real; ++destination_iterator_imag; } } private: /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &output_fragment_real, typename OutputTileIterator::Fragment &output_fragment_imag, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment_real, typename SharedLoadIterator::Fragment const &aligned_accum_fragment_imag, typename OutputTileIterator::Fragment const &source_fragment_real, typename OutputTileIterator::Fragment const &source_fragment_imag) { OutputAccessType *output_frag_real_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment_real); OutputAccessType *output_frag_imag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment_imag); AccumulatorAccessType const *compute_frag_real_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_real); AccumulatorAccessType const *compute_frag_imag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_imag); OutputAccessType const *source_frag_real_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment_real); OutputAccessType const *source_frag_imag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment_imag); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator auto result_fragment = output_op( make_ArrayPlanarComplex(compute_frag_real_ptr[i], compute_frag_imag_ptr[i]), make_ArrayPlanarComplex(source_frag_real_ptr[i], source_frag_imag_ptr[i]) ); output_frag_real_ptr[i] = result_fragment.real; output_frag_imag_ptr[i] = result_fragment.imag; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_planar_complex.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_planar_complex.h", "repo_id": "cutlass", "token_count": 4937 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/tensor_ref.h" namespace cutlass { namespace epilogue { namespace threadblock { template< typename TensorLayout_, ///! The original output tensor layout typename OutputIteratorLayout_, ///! Layout used by epilogue output iterator typename TensorRef_, ///! Input tensor to epilogue output iterator conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem > struct ConvOutputIteratorParameter { using TensorLayout = TensorLayout_; using OutputIteratorLayout = OutputIteratorLayout_; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; /// Wgrad stride idx for implicit gemm algorithm // Conv2d row-major matrix (KxRSC) // Conv3d row-major matrix (KxTRSC) static int const kWgradStrideIdx = platform::is_same<TensorLayout, layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradStrideIdx : 0); CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(kTensorStrideIdx); } CUTLASS_HOST_DEVICE static OutputTensorCoord extent(ConvProblemSize problem_size) { return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(); } }; template< typename TensorRef_, ///! Input tensor to epilogue output iterator typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem > struct ConvOutputIteratorParameter<layout::TensorNHWC, layout::TensorNHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> { using TensorLayout = layout::TensorNHWC; using OutputIteratorLayout = layout::TensorNHWC; using MappedLayout = layout::RowMajor; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using MappedTensorCoord = typename MappedLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop; using ConvProblemSize = ConvProblemSize_; CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(); } CUTLASS_HOST_DEVICE static MappedTensorCoord extent(ConvProblemSize problem_size) { return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(); } }; template< typename TensorRef_, ///! Input tensor to epilogue output iterator typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem > struct ConvOutputIteratorParameter<layout::TensorNDHWC, layout::TensorNDHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> { using TensorLayout = layout::TensorNDHWC; using OutputIteratorLayout = layout::TensorNDHWC; using MappedLayout = layout::RowMajor; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using MappedTensorCoord = typename MappedLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop; using ConvProblemSize = ConvProblemSize_; CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(); } CUTLASS_HOST_DEVICE static MappedTensorCoord extent(ConvProblemSize problem_size) { return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(); } }; template < int InterleavedK, typename TensorRef_, conv::Operator ConvOperator, typename ConvProblemSize_ > struct ConvOutputIteratorParameter< layout::TensorNCxHWx<InterleavedK>, layout::TensorNCxHWx<InterleavedK>, TensorRef_, ConvOperator, ConvProblemSize_> { using TensorLayout = typename layout::TensorNCxHWx<InterleavedK>; using OutputIteratorLayout = typename layout::TensorNCxHWx<InterleavedK>; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(); } CUTLASS_HOST_DEVICE static OutputTensorCoord extent(ConvProblemSize problem_size) { return problem_size.output_extent(); } }; } // namespace threadblock } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/threadblock/output_iterator_parameter.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/output_iterator_parameter.h", "repo_id": "cutlass", "token_count": 2284 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/simt_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Fragment iterator for SIMT accumulator arrangements template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename Operator, ///< matrix multiply operation (concept: arch::Mma) typename Layout, ///< target shared memory layout typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class FragmentIteratorSimt; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename Operator_ , ///< matrix multiply operator (concept: arch::Mma) typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class FragmentIteratorSimt<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> { public: using WarpShape = WarpShape_; using Operator = Operator_; using Layout = layout::RowMajor; /// Policy for warp-level epilogue components using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< typename Operator::ElementC, Policy::kElementsPerIteration>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< typename Operator::ElementC, Policy::kAccumulatorElementCount>; using OutputAccumulatorTile = AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: /// Internal access type using AccessType = Array<typename Operator::ElementC, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorSimt(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorSimt &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorSimt &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { int accumulator_access_offset = index_ * Policy::kAccessesPerIteration + n; frag_ptr[n] = accumulators_[accumulator_access_offset]; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/warp/fragment_iterator_simt.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/fragment_iterator_simt.h", "repo_id": "cutlass", "token_count": 1642 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Define basic numeric operators This is inspired by the Standard Library's <functional> header. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include <cuda_runtime.h> #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include <mma.h> #endif // defined(CUTLASS_ARCH_WMMA_ENABLED) #ifdef _MSC_VER // Provides support for alternate operators such as 'and', 'or', ... #include <iso646.h> #endif // _MSC_VER namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct absolute_value_op { CUTLASS_HOST_DEVICE T operator()(T lhs) const { return abs(lhs); } }; template <> struct absolute_value_op<float> { CUTLASS_HOST_DEVICE float operator()(float lhs) const { return fabs(lhs); } }; template <typename T> struct plus { CUTLASS_HOST_DEVICE T operator()(T lhs, T const &rhs) const { lhs += rhs; return lhs; } }; template <typename T> struct minus { CUTLASS_HOST_DEVICE T operator()(T lhs, T const &rhs) const { lhs -= rhs; return lhs; } }; template <typename T> struct multiplies { CUTLASS_HOST_DEVICE T operator()(T lhs, T const &rhs) const { lhs *= rhs; return lhs; } }; template <typename T> struct scale { T const scaling_factor_; CUTLASS_HOST_DEVICE scale(float scaling_factor) : scaling_factor_(scaling_factor) { } T operator()(T const &rhs) const { T result = rhs * scaling_factor_; return result; } }; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 /// Partial specializations needed when __CUDA_NO_HALF2_OPERATORS__ is set template<> struct plus<__half2> { CUTLASS_HOST_DEVICE __half2 operator()(__half2 lhs, __half2 const &rhs) const { return __hadd2(lhs, rhs); } }; template<> struct minus<__half2> { CUTLASS_HOST_DEVICE __half2 operator()(__half2 lhs, __half2 const &rhs) const { return __hsub2(lhs, rhs); } }; template<> struct multiplies<__half2> { CUTLASS_HOST_DEVICE __half2 operator()(__half2 lhs, __half2 const &rhs) const { return __hmul2(lhs, rhs); } }; /// Partial specializations needed when __CUDA_NO_HALF_OPERATORS__ is set template<> struct plus<__half> { CUTLASS_HOST_DEVICE __half operator()(__half lhs, __half const &rhs) const { return __hadd(lhs, rhs); } }; template<> struct minus<__half> { CUTLASS_HOST_DEVICE __half operator()(__half lhs, __half const &rhs) const { return __hsub(lhs, rhs); } }; template<> struct multiplies<__half> { CUTLASS_HOST_DEVICE __half operator()(__half lhs, __half const &rhs) const { return __hmul(lhs, rhs); } }; #endif // defined(__CUDA_ARCH__) /// Squares with optional conversion template <typename T, typename Output = T> struct square { CUTLASS_HOST_DEVICE Output operator()(T lhs) const { multiplies<Output> mul_op; Output y = Output(lhs); return mul_op(y, y); } }; /// Returns the magnitude squared of an element. template <typename T, typename Output = T> struct magnitude_squared { CUTLASS_HOST_DEVICE Output operator()(T lhs) const { multiplies<Output> mul_op; Output y = Output(lhs); return mul_op(y, y); } }; /// Computes the square of a difference with optional conversion template <typename T, typename Output = T> struct square_difference { CUTLASS_HOST_DEVICE Output operator()(T lhs, T rhs) const { multiplies<Output> mul_op; Output y = Output(lhs) - Output(rhs); return mul_op(y, y); } }; /// Computes the square of a difference with optional conversion template <typename T, typename Output = T> struct magnitude_squared_difference { CUTLASS_HOST_DEVICE Output operator()(T lhs, T rhs) const { multiplies<Output> mul_op; Output y = Output(lhs) - Output(rhs); return mul_op(y, y); } }; // Computes the reciprocal square root template <typename T> struct inverse_square_root; template <> struct inverse_square_root<float> { CUTLASS_HOST_DEVICE float operator()(float const &lhs) const { #if defined(__CUDA_ARCH__) return rsqrtf(lhs); #else return 1.f / std::sqrt(lhs); #endif } }; template <> struct inverse_square_root<half_t> { CUTLASS_HOST_DEVICE half_t operator()(half_t const &lhs) const { #if defined(__CUDA_ARCH__) auto result = hrsqrt(reinterpret_cast<__half const &>(lhs)); return reinterpret_cast<half_t const &>(result); #else return half_t(1.f / std::sqrt(half_t::convert(lhs))); #endif } }; /// Divides template <typename T> struct divides { CUTLASS_HOST_DEVICE T operator()(T lhs, T const &rhs) const { lhs /= rhs; return lhs; } }; /// reciprocal_approximate template <typename T> struct reciprocal_approximate { CUTLASS_HOST_DEVICE T operator()(T lhs) const { return divides<T>{}(T(1), lhs); } }; template <> struct reciprocal_approximate <float> { CUTLASS_HOST_DEVICE float operator()(float lhs) const { float ret; ret = 1.0f / lhs; return ret; } }; /// Negate template <typename T> struct negate { CUTLASS_HOST_DEVICE T operator()(T lhs) const { return -lhs; } }; /// Greater equal template <typename T> struct greater_equal { CUTLASS_HOST_DEVICE bool operator()(T const &lhs, T const &rhs) const { return (lhs >= rhs); } }; /// Greater template <typename T> struct greater { CUTLASS_HOST_DEVICE bool operator()(T const &lhs, T const &rhs) const { return (lhs > rhs); } }; /// Less equal template <typename T> struct less_equal { CUTLASS_HOST_DEVICE bool operator()(T const &lhs, T const &rhs) const { return (lhs <= rhs); } }; /// Less template <typename T> struct less { CUTLASS_HOST_DEVICE bool operator()(T const &lhs, T const &rhs) const { return (lhs < rhs); } }; template <typename T, bool PropagateNaN = false> struct maximum { CUTLASS_HOST_DEVICE T operator()(T const &lhs, T const &rhs) const { return (lhs < rhs ? rhs : lhs); } }; // This is a subclass and not an alias // in order to work around a known Clang issue, // where a template template parameter with one template parameter // does not match classes that take multiple template parameters // but have defaults for all but the first. template<typename T> struct maximum_with_default_nan_propagation : public maximum<T> {}; // Maximum with nan propagation // To propagate NANs, the "max" of a two element that contains NaNs should also return a NaN template <typename T> struct maximum<T, true> { CUTLASS_HOST_DEVICE T operator()(T const &lhs, T const &rhs) const { #if defined(__CUDA_ARCH__) return lhs > rhs or isnan(lhs) ? lhs : rhs; #else return lhs > rhs or std::isnan(lhs) ? lhs : rhs; #endif } }; template <> struct maximum<float, false> { CUTLASS_HOST_DEVICE float operator()(float const &lhs, float const &rhs) const { return fmaxf(lhs, rhs); } }; template <> struct maximum<float, true> { CUTLASS_HOST_DEVICE float operator()(float const lhs, float const rhs) const { float res; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) asm volatile("max.NaN.f32 %0, %1, %2;\n" : "=f"(res) : "f"(lhs), "f"(rhs)); #elif defined(__CUDA_ARCH__) res = lhs > rhs or isnan(lhs) ? lhs : rhs; #else res = lhs > rhs or std::isnan(lhs) ? lhs : rhs; #endif return res; } }; // This is a subclass and not an alias // in order to work around a known Clang issue, // where a template template parameter with one template parameter // does not match classes that take multiple template parameters // but have defaults for all but the first. template <typename T> struct maximum_with_nan_propagation : maximum<T, true> {}; // This alias exists for backwards compatibility only. // Please use the correctly spelled class template above. template <typename T> using maximum_with_nan_propogation = maximum_with_nan_propagation<T>; template <typename T, bool PropagateNaN = false> struct minimum{ CUTLASS_HOST_DEVICE T operator()(T const &lhs, T const &rhs) const { return (rhs < lhs ? rhs : lhs); } }; template <typename T> struct minimum<T, true> { CUTLASS_HOST_DEVICE T operator()(T const &lhs, T const &rhs) const { #if defined(__CUDA_ARCH__) return lhs < rhs or isnan(lhs) ? lhs : rhs; #else return lhs < rhs or std::isnan(lhs) ? lhs : rhs; #endif } }; template <> struct minimum<float, false> { CUTLASS_HOST_DEVICE float operator()(float const &lhs, float const &rhs) const { return fminf(lhs, rhs); } }; template <typename T, bool PropagateNaN = false> struct maximum_absolute_value { CUTLASS_HOST_DEVICE float operator()(T const &lhs, T const &rhs) const { absolute_value_op<T> abs_op; maximum<T, PropagateNaN> max_op; return max_op(abs_op(lhs), abs_op(rhs)); } }; // assumes the left operand is already an absolute value template <typename T, bool PropagateNaN = false> struct maximum_absolute_value_reduction { CUTLASS_HOST_DEVICE float operator()(T const &lhs, T const &rhs) const { absolute_value_op<T> abs_op; maximum<T, PropagateNaN> max_op; return max_op(lhs, abs_op(rhs)); } }; /// Fused multiply-add template <typename A, typename B = A, typename C = A> struct multiply_add { CUTLASS_HOST_DEVICE C operator()(A const &a, B const &b, C const &c) const { return C(a) * C(b) + c; } }; template <typename T> struct square_and_plus { CUTLASS_HOST_DEVICE T operator()(T lhs, T const &rhs) const { multiply_add<T> multiply_add_op; return multiply_add_op(rhs, rhs, lhs); } }; // Fused multiply-add that takes exactly one template parameter. // This is useful for working around a known Clang issue, // where a template template parameter with one template parameter // does not match classes that take multiple template parameters // but have defaults for all but the first. template <typename A> struct homogeneous_multiply_add : public multiply_add<A, A, A> {}; /// Fused multiply-add template <typename A, typename B = A, typename C = A> struct multiply_add_relu0 { CUTLASS_HOST_DEVICE C operator()(A const &a, B const &b, C const &c) const { maximum<C> mx; return mx(C(a) * C(b) + c, C(0)); } }; /// Fused multiply-add template <typename T> struct and_add { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b, T const &c) const { return ((a & b) + c); } }; /// Fused multiply-add template <typename T> struct xor_add { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b, T const &c) const { return ((a ^ b) + c); } }; template <typename T> struct conjugate { CUTLASS_HOST_DEVICE T operator()(T const &a) const { return a; } }; template <typename T> struct first { CUTLASS_HOST_DEVICE T operator()(T const & first, T const &...) const { return first; } CUTLASS_HOST_DEVICE T operator()(T const & first) const { return first; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct logical_and { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b) const { return ((static_cast<bool>(a) && static_cast<bool>(b)) ? T(1) : T()); } }; template <typename T> struct logical_or { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b) const { return ((static_cast<bool>(a) || static_cast<bool>(b)) ? T(1) : T()); } }; template <typename T> struct logical_not { CUTLASS_HOST_DEVICE T operator()(T const &a) const { return T(!(a)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct bit_and { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b) const { return a & b; } }; template <typename T> struct bit_or { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b) const { return a | b; } }; template <typename T> struct bit_not { CUTLASS_HOST_DEVICE T operator()(T const &a) const { return ~a; } }; template <typename T> struct bit_xor { CUTLASS_HOST_DEVICE T operator()(T const &a, T const &b) const { return a ^ b; } }; ////////////////////////////////////////////////////////////////////////////////////////////////// /// Atomic reductions template <typename T> struct atomic_add { CUTLASS_DEVICE void operator()(T *ptr, const T &data) { #if defined(__CUDA_ARCH__) atomicAdd(ptr, data); #endif } }; template<> struct atomic_add<double> { CUTLASS_DEVICE void operator()(double *ptr, const double &data) { #if !defined(__CUDA_ARCH__) CUTLASS_UNUSED(ptr); CUTLASS_UNUSED(data); #elif (__CUDA_ARCH__ >= 600) atomicAdd(ptr, data); #else // Use CAS loop unsigned long long int* ptr_int = reinterpret_cast<unsigned long long int*>(ptr); unsigned long long int old_int = *ptr_int; unsigned long long int assumed_int; do { double update = data + __longlong_as_double(old_int); assumed_int = old_int; old_int = atomicCAS(ptr_int, assumed_int, __double_as_longlong(update)); } while (assumed_int != old_int); #endif // (__CUDA_ARCH__ >= 600) } }; template<> struct atomic_add<half2> { CUTLASS_DEVICE void operator()(half2 *ptr, const half2 &data) { #if !defined(__CUDA_ARCH__) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)) CUTLASS_UNUSED(ptr); CUTLASS_UNUSED(data); #else // Vector-2 atomic reduction requires .target sm_60 or higher uint32_t word = reinterpret_cast<const uint32_t&>(data); asm volatile ("red.gpu.global.add.noftz.f16x2 [%0], %1;\n" : : "l"(ptr), "r"(word)); #endif // (__CUDA_ARCH__ >= 600) } }; template <typename T> using red [[deprecated("use atomic_add instead")]] = atomic_add<T>; template <typename T> struct atomic_maximum { CUTLASS_DEVICE T operator()(T *ptr, T value) const { #if defined(__CUDA_ARCH__) return atomicMax(ptr, value); #else CUTLASS_UNUSED(ptr); CUTLASS_UNUSED(value); CUTLASS_NOT_IMPLEMENTED(); return 0; #endif } }; template <> struct atomic_maximum<float> { CUTLASS_DEVICE float operator()(float *ptr, float value) const { #if defined(__CUDA_ARCH__) return !signbit(value) ? __int_as_float(atomicMax((int*)ptr, __float_as_int(value))) : __uint_as_float(atomicMin((unsigned int*)ptr, __float_as_uint(value))); #else CUTLASS_UNUSED(ptr); CUTLASS_UNUSED(value); CUTLASS_NOT_IMPLEMENTED(); return 0; #endif } }; // is_atomic template <class Fn> struct is_atomic : platform::false_type {}; template <class T> struct is_atomic<atomic_add<T>> : platform::true_type {}; template <class T> struct is_atomic<atomic_maximum<T>> : platform::true_type {}; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations for nvcuda::wmma::fragment<Use, m, n, k, T, Layout> // ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_ARCH_WMMA_ENABLED) template<typename Use, int m, int n, int k, typename T, typename Layout> struct plus<nvcuda::wmma::fragment<Use, m, n, k, T, Layout>> { using Fragment = nvcuda::wmma::fragment<Use, m, n, k, T, Layout>; using ElementType = typename Fragment::element_type; CUTLASS_HOST_DEVICE Fragment operator()(Fragment const &lhs, Fragment const &rhs) const { Fragment result; plus<ElementType> scalar_op; ElementType *result_elts = reinterpret_cast<ElementType*>(&result); const ElementType *lhs_elts = reinterpret_cast<const ElementType*>(&lhs); const ElementType *rhs_elts = reinterpret_cast<const ElementType*>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Fragment::num_elements; i++) { result_elts[i] = scalar_op(lhs_elts[i], rhs_elts[i]); } return result; } }; #endif // defined(CUTLASS_ARCH_WMMA_ENABLED) ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/functional.h/0
{ "file_path": "cutlass/include/cutlass/functional.h", "repo_id": "cutlass", "token_count": 6596 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Base device-level grouped kernel. */ #pragma once #include <limits> #include <numeric> #include <vector> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/trace.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM Grouped template <typename BaseKernel_> class BaseGrouped { public: using BaseKernel = BaseKernel_; using ElementA = typename BaseKernel::ElementA; using LayoutA = typename BaseKernel::LayoutA; using TensorRefA = TensorRef<ElementA const, LayoutA>; static ComplexTransform const kTransformA = BaseKernel::kTransformA; static int const kAlignmentA = BaseKernel::kAlignmentA; using ElementB = typename BaseKernel::ElementB; using LayoutB = typename BaseKernel::LayoutB; using TensorRefB = TensorRef<ElementB const, LayoutB>; static ComplexTransform const kTransformB = BaseKernel::kTransformB; static int const kAlignmentB = BaseKernel::kAlignmentB; using ElementC = typename BaseKernel::ElementC; using LayoutC = typename BaseKernel::LayoutC; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; static int const kAlignmentC = BaseKernel::kAlignmentC; using ElementAccumulator = typename BaseKernel::Mma::Policy::Operator::ElementC; using EpilogueOutputOp = typename BaseKernel::EpilogueOutputOp; using ThreadblockSwizzle = typename BaseKernel::ThreadblockSwizzle; using Operator = typename BaseKernel::Operator; using WarpMmaOperator = typename BaseKernel::Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename WarpMmaOperator::MathOperator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename BaseKernel::Mma::Shape; using WarpShape = typename BaseKernel::WarpShape; using InstructionShape = typename BaseKernel::InstructionShape; static int const kStages = BaseKernel::Mma::kStages; /// Argument structure using Arguments = typename BaseKernel::Arguments; using ProblemInfo = typename BaseKernel::ProblemVisitor::ProblemInfo; protected: /// Kernel parameters object typename BaseKernel::Params params_; private: /// Get the number of tiles across all problems in a group static int32_t group_tile_count(const cutlass::gemm::GemmCoord* problem_sizes_ptr, int problem_count) { int32_t tiles = 0; for (int32_t i = 0; i < problem_count; ++i) { cutlass::gemm::GemmCoord problem = problem_sizes_ptr[i]; BaseKernel::ProblemVisitor::possibly_transpose_problem(problem); tiles += problem_tile_count(problem); } return tiles; } /// Copy from `data` to `workspace` Status copy_to_workspace(void* workspace, void* data, size_t bytes) { cudaError_t cuda_error = cudaMemcpy(workspace, data, bytes, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { // Call cudaGetLastError() to clear the error bit cuda_error = cudaGetLastError(); CUTLASS_TRACE_HOST( " cudaMemcpy() returned error " << cudaGetErrorString(cuda_error)); return Status::kErrorInternal; } return Status::kSuccess; } /// Precomputes scheduling information for the grouped GEMM Status precompute(Arguments const &args, int32_t tile_count, void* workspace) { size_t workspace_bytes = get_workspace_size(args); std::vector<uint8_t> host_workspace(workspace_bytes); BaseKernel::ProblemVisitor::host_precompute(args.host_problem_sizes, args.problem_count, args.threadblock_count, (void*)host_workspace.data()); return copy_to_workspace(workspace, host_workspace.data(), workspace_bytes); } /// Reorder `data` according to `indices` template <typename T> static void reorder_array(T* data, const std::vector<size_t>& indices) { // For now, simply create a copy of the data and then copy over to the original. std::vector<T> copy(indices.size()); for (size_t i = 0; i < indices.size(); ++i) { copy.at(i) = data[indices[i]]; } memcpy(data, copy.data(), indices.size() * sizeof(T)); } public: /// Constructs the GEMM. BaseGrouped() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { return BaseKernel::can_implement(args); } /// Get the number of tiles in a problem static int32_t problem_tile_count(cutlass::gemm::GemmCoord const &problem) { auto grid = BaseKernel::ProblemVisitor::grid_shape(problem); return BaseKernel::ProblemVisitor::tile_count(grid); } /// Get the number of tiles across all problems in a group static int32_t group_tile_count(Arguments const &args) { if (args.host_problem_sizes == nullptr) { CUTLASS_TRACE_HOST("Received nullptr for `args.host_problem_sizes"); return -1; } return group_tile_count(args.host_problem_sizes, args.problem_count); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) { return BaseKernel::ProblemVisitor::get_workspace_size(args.host_problem_sizes, args.problem_count, args.threadblock_count); } else { return 0; } } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return dim3(args.threadblock_count, 1, 1); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { CUTLASS_TRACE_HOST("BaseGrouped::maximum_active_blocks()"); int smem_size = int(sizeof(typename BaseKernel::SharedStorage)); CUTLASS_TRACE_HOST(" smem_size: " << smem_size << " bytes"); cudaError_t result; if (smem_size > (48 << 10)) { result = cudaFuncSetAttribute(Kernel<BaseKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { // Call cudaGetLastError() to clear the error bit result = cudaGetLastError(); CUTLASS_TRACE_HOST( " cudaFuncSetAttribute() returned error " << cudaGetErrorString(result)); return -1; } } int max_active_blocks = -1; result = cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, Kernel<BaseKernel>, BaseKernel::kThreadCount, smem_size); if (result != cudaSuccess) { // Call cudaGetLastError() to clear the error bit result = cudaGetLastError(); CUTLASS_TRACE_HOST( " cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error " << cudaGetErrorString(result)); return -1; } CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks); return max_active_blocks; } /// Sorts each pointer passed in according to the indices that sort /// `problem_sizes_ptr` in descending order of problem-K dimension. static void sort_problems(int problem_count, cutlass::gemm::GemmCoord* problem_sizes_ptr, int64_t* lda_host_ptr, int64_t* ldb_host_ptr, int64_t* ldc_host_ptr, int64_t* ldd_host_ptr, int64_t* offset_A_ptr, int64_t* offset_B_ptr, int64_t* offset_C_ptr, int64_t* offset_D_ptr) { std::vector<size_t> indices(problem_count); std::iota(indices.begin(), indices.end(), 0); std::stable_sort(indices.begin(), indices.end(), [&problem_sizes_ptr](size_t i, size_t j) { return problem_sizes_ptr[i].k() > problem_sizes_ptr[j].k(); }); reorder_array(problem_sizes_ptr, indices); reorder_array(lda_host_ptr, indices); reorder_array(ldb_host_ptr, indices); reorder_array(ldc_host_ptr, indices); reorder_array(ldd_host_ptr, indices); reorder_array(offset_A_ptr, indices); reorder_array(offset_B_ptr, indices); reorder_array(offset_C_ptr, indices); reorder_array(offset_D_ptr, indices); } /// Computes the number of threadblocks to launch for the grouped kernel static int sufficient(const cutlass::gemm::GemmCoord* problem_sizes_ptr=nullptr, int problem_count=0, int available_sm_count=-1) { // Determine the number of blocks that would be launched to fill up a single // wave on the GPU with each SM having maximum occupancy. int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { // Call cudaGetLastError() to clear the error bit result = cudaGetLastError(); CUTLASS_TRACE_HOST(" cudaGetDevice() returned error " << cudaGetErrorString(result)); return 0; } int multiprocessor_count; result = cudaDeviceGetAttribute(&multiprocessor_count, cudaDevAttrMultiProcessorCount, device_idx); if (result != cudaSuccess) { CUTLASS_TRACE_HOST( " cudaDeviceGetAttribute() returned error " << cudaGetErrorString(result)); return 0; } bool override_sm_count = (available_sm_count < 0 || available_sm_count > multiprocessor_count); if (override_sm_count) { available_sm_count = multiprocessor_count; } int max_active_blocks = maximum_active_blocks(); if (max_active_blocks <= 0) { return 0; } int occupancy_based_block_count = available_sm_count * max_active_blocks; if (problem_sizes_ptr == nullptr || problem_count == 0) { return occupancy_based_block_count; } int total_tiles = group_tile_count(problem_sizes_ptr, problem_count); // If the group contains a single problem, launching the exact number of // threadblocks needed to cover the problem minimizes the work performed // per threadblock in finding the next tile to compute. We return total_tiles // unless the user has provided the SM count. if (problem_count == 1 && override_sm_count) { return total_tiles; } // Choose between the full wave of threadblocks and the tile count. If there // are fewer tiles in the group than threadblocks in the full wave, only // some threadblocks will be assigned tiles. Those threadblocks // which are not assigned tiles still need to perform the work of iterating through // problem sizes to determine that they have no work to do. This competes for cycles // with those threadblocks that are assigned tiles to compute. return std::min(total_tiles, occupancy_based_block_count); } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { CUTLASS_TRACE_HOST("BaseGrouped::initialize() - workspace " << workspace << ", stream: " << (stream ? "non-null" : "null")); // Workspace size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes && !workspace) { return Status::kErrorWorkspaceNull; } if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) { int32_t tile_count = group_tile_count(args); Status status = precompute(args, tile_count, workspace); if (status != Status::kSuccess) { return status; } params_ = typename BaseKernel::Params(args, workspace, tile_count); } else { params_ = typename BaseKernel::Params(args, workspace); } // Specify shared memory capacity for kernel. int smem_size = int(sizeof(typename BaseKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<BaseKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes && !workspace) { return Status::kErrorWorkspaceNull; } if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) { int32_t tile_count = group_tile_count(args); Status status = precompute(args, tile_count, workspace); if (status != Status::kSuccess) { return status; } params_.update(args, workspace, tile_count); } else { params_.update(args, workspace); } return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { // // Configure grid and block dimensions // if (!params_.problem_visitor.problem_count) { return Status::kSuccess; } dim3 grid(params_.threadblock_count, 1, 1); dim3 block(BaseKernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename BaseKernel::SharedStorage)); // // Launch kernel // // Launch cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params_); // // Query for errors // cudaError_t result = cudaGetLastError(); if (result != cudaSuccess) { CUTLASS_TRACE_HOST(" grid launch failed with error " << cudaGetErrorString(result)); return Status::kErrorInternal; } return Status::kSuccess; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Initializes and runs the kernel. Status operator()( Arguments const &args, void *workspace, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/base_grouped.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/base_grouped.h", "repo_id": "cutlass", "token_count": 6242 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/gemm/threadblock/gemv.h" #include "cutlass/gemm/threadblock/default_gemv_core.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the ThreadBlock tile - concept: gemm::GemmShape<> typename ThreadBlockShape_, /// Size of the per-thread shape - concept: gemm::GemmShape<> typename ThreadShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C/D matrix typename ElementCD_, /// Layout of C/D matrix (concept: MatrixLayout) typename LayoutCD_, /// Data type of the accumulator typename ElementAccumulator_ = ElementCD_> struct DefaultGemv { /// Shape of Threadblock-level matrix operation (concept: GemmShape) using ThreadBlockShape = ThreadBlockShape_; /// Shape of warp-level matrix operation (concept: GemmShape) using ThreadShape = ThreadShape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulators using ElementAccumulator = ElementAccumulator_; /// Data type of accumulators (same as C/D) using LayoutAccumulator = LayoutCD_; /// Data type of input/output matrix C/D using ElementCD = ElementCD_; /// Layout of input/output matrix C/D using LayoutCD = LayoutCD_; // Define the core components using Core = typename cutlass::gemm::threadblock::DefaultGemvCore< ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, LayoutAccumulator>; // Define the threadblock-scoped gemv using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv<Core>; // Iterator for multiplicand A using IteratorA = typename ThreadBlockGemv::IteratorA; // Iterator for multiplicand B using IteratorB = typename ThreadBlockGemv::IteratorB; /// Policy for the iterator that reads/writes C/D using IteratorPolicyCD = typename platform::conditional< platform::is_same<LayoutCD, layout::RowMajor>::value, cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous< layout::PitchLinearShape<ThreadBlockShape::kN, ThreadBlockShape::kM>, Core::kThreadsPerN, ThreadShape::kN>, cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided< layout::PitchLinearShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, Core::kThreadsPerN, ThreadShape::kM>>::type; /// Iterator that reads/writes C/D using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, ElementCD, LayoutCD, 0, IteratorPolicyCD>; /// Fragment storage for C/D using FragmentCD = typename IteratorCD::Fragment; // Define the threadblock swizzle using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/default_gemv.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_gemv.h", "repo_id": "cutlass", "token_count": 1557 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmArray { using Mma = Mma_; using Epilogue = Epilogue_; using OutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; int swizzle_log_tile; typename Mma::IteratorA::Params params_A; typename Mma::IteratorA::Element const * const * ptr_A; typename Mma::IteratorB::Params params_B; typename Mma::IteratorB::Element const * const * ptr_B; typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::Element const * const * ptr_C; typename Epilogue::OutputTileIterator::Params params_D; typename Epilogue::OutputTileIterator::Element * const * ptr_D; int64_t stride_D; typename OutputOp::Params epilogue; int batch_count; int gemm_k_iterations; // // Methods // CUTLASS_HOST_DEVICE Params() : swizzle_log_tile(0) { } CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size_, cutlass::gemm::GemmCoord const & grid_tiled_shape_, typename Mma::IteratorA::Element const * const * ptr_A_, typename Mma::IteratorA::Layout layout_A, typename Mma::IteratorB::Element const * const * ptr_B_, typename Mma::IteratorB::Layout layout_B, typename Epilogue::OutputTileIterator::Element const * const * ptr_C_, typename Epilogue::OutputTileIterator::Layout layout_C, typename Epilogue::OutputTileIterator::Element * const * ptr_D_, typename Epilogue::OutputTileIterator::Layout layout_D, typename OutputOp::Params epilogue_, int batch_count_ ): problem_size(problem_size_), grid_tiled_shape(grid_tiled_shape_), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(layout_A), ptr_A(ptr_A_), params_B(layout_B), ptr_B(ptr_B_), params_C(layout_C), ptr_C(ptr_C_), params_D(layout_D), ptr_D(ptr_D_), epilogue(epilogue_), batch_count(batch_count_), gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) { } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE GemmArray() { } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension for (int batch_idx = threadblock_swizzle.get_batch_idx(); batch_idx < params.batch_count; batch_idx += gridDim.z) { // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, 0 }; cutlass::MatrixCoord tb_offset_B{ 0, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, const_cast<typename Mma::IteratorA::Element *>(params.ptr_A[batch_idx]), params.problem_size.mk(), thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, const_cast<typename Mma::IteratorB::Element *>(params.ptr_B[batch_idx]), params.problem_size.kn(), thread_idx, tb_offset_B); // // Main loop // // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // OutputOp output_op(params.epilogue); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); // Tile iterator writing to output tile typename Epilogue::OutputTileIterator iterator_C( params.params_C, const_cast<typename Epilogue::OutputTileIterator::Element *>(params.ptr_C[batch_idx]), params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to output tile typename Epilogue::OutputTileIterator iterator_D( params.params_D, params.ptr_D[batch_idx], params.problem_size.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_C); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/gemm_array.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_array.h", "repo_id": "cutlass", "token_count": 3219 }
35
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Gemm kernel with an epilogue defined under the epilogue visitor concept */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/kernel/gemm_universal.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // Gemm that compute the epilogue visitor functor template < typename Mma, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > class GemmWithEpilogueVisitor: GemmUniversal<Mma,Epilogue, ThreadblockSwizzle_> { public: using ThreadblockSwizzle = ThreadblockSwizzle_; using Base = GemmUniversal<Mma,Epilogue, ThreadblockSwizzle>; using Base::Base; using FusionCallbacks = typename Epilogue::FusionCallbacks; using ElementA = typename Base::ElementA; using LayoutA = typename Base::LayoutA; using ElementB = typename Base::ElementB; using LayoutB = typename Base::LayoutB; using ElementC = typename Base::ElementC; using LayoutC = typename Base::LayoutC; using ThreadblockShape = typename Mma::Shape; // // Structures // using SharedStorage = typename Base::SharedStorage; using Arguments = typename Base::Arguments; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params : UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB> { using ParamsBase = UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB>; // // Data members // cute::Shape<int32_t,int32_t,int32_t> problem_shape; typename Mma::IteratorA::Params params_A; typename Mma::IteratorB::Params params_B; typename FusionCallbacks::Params output_op; void * ptr_A; void * ptr_B; int64_t batch_stride_A; int64_t batch_stride_B; int * ptr_gather_A_indices; int * ptr_gather_B_indices; // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : ParamsBase(args, device_sms, sm_occupancy), params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a), params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b), output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)), ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)) { // Raise error on unsupported modes assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel."); assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 ) && "Sm80 EVT does not support SplitKSerial."); assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm."); } /// Lightweight update given a subset of arguments. void update(Arguments const &args) { CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()"); // Update input pointers ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; this->batch_stride_D = args.batch_stride_D; ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices); ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices); output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); } }; public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithEpilogueVisitor op; op(params, shared_storage); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { ThreadblockSwizzle threadblock_swizzle; run_with_swizzle(params, shared_storage, threadblock_swizzle); } /// Executes one GEMM with an externally-provided swizzling function CUTLASS_DEVICE void run_with_swizzle(Params const &params, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A, params.ptr_gather_A_indices); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B, params.ptr_gather_B_indices); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); Epilogue epilogue( params.output_op, shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Execute the epilogue operator to update the destination tensor. epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h", "repo_id": "cutlass", "token_count": 3758 }
36
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/workspace.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/reg_reconfig.h" #include "cutlass/arch/mma_sm90.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/group_array_problem_shape.hpp" #include "cutlass/gemm/kernel/tile_scheduler.hpp" #include "cutlass/pipeline/pipeline.hpp" #include "cute/tensor.hpp" #include "cutlass/trace.h" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel { /////////////////////////////////////////////////////////////////////////////// template < class ProblemShape_, class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_ > class GemmUniversal< ProblemShape_, CollectiveMainloop_, CollectiveEpilogue_, TileScheduler_, cute::enable_if_t<cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>> > { public: // // Type Aliases // using ProblemShape = ProblemShape_; static_assert(rank(typename ProblemShape::UnderlyingProblemShape{}) == 3 or rank(typename ProblemShape::UnderlyingProblemShape{}) == 4, "ProblemShape{} should be <M,N,K> or <M,N,K,L>"); // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using UnderlyingStrideA = typename CollectiveMainloop::UnderlyingStrideA; using ElementB = typename CollectiveMainloop::ElementB; using UnderlyingStrideB = typename CollectiveMainloop::UnderlyingStrideB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using Schedule = typename DispatchPolicy::Schedule; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using UnderlyingStrideC = typename CollectiveEpilogue::UnderlyingStrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using UnderlyingStrideD = typename CollectiveEpilogue::UnderlyingStrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(ArchTag::kMinComputeCapability >= 90); static_assert(cute::is_void_v<TileScheduler_>, "Ptr-Array Cooperative and Grouped Gemm Cooperative kernel only supports the default scheduler."); static constexpr bool IsGroupedGemmKernel = !cute::is_same_v<UnderlyingStrideA, StrideA>; using TileScheduler = cute::conditional_t<IsGroupedGemmKernel, typename detail::TileSchedulerSelector< GroupScheduler, ArchTag, TileShape, ClusterShape, ProblemShape>::Scheduler, typename detail::TileSchedulerSelector< void, ArchTag, TileShape, ClusterShape>::Scheduler>; using TileSchedulerArguments = typename TileScheduler::Arguments; using TileSchedulerParams = typename TileScheduler::Params; static constexpr uint32_t NumLoadWarpGroups = 1; static constexpr uint32_t NumMmaWarpGroups = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup; static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); static constexpr uint32_t MinBlocksPerMultiprocessor = 1; /// Register requirement for Load and Math WGs static constexpr uint32_t LoadRegisterRequirement = 40; static constexpr uint32_t MmaRegisterRequirement = 232; // 1 stage ordered sequence between mainloop and epilogue producer load threads using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>; // Kernel level shared memory storage struct SharedStorage { struct TensorStorage : cute::aligned_struct<128> { using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; MainloopTensorStorage mainloop; EpilogueTensorStorage epilogue; } tensors; struct PipelineStorage : cute::aligned_struct<16> { using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; alignas(16) MainloopPipelineStorage mainloop; alignas(16) EpiLoadPipelineStorage epi_load; alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order; } pipelines; struct TensorMapStorage : cute::aligned_struct<128> { using MainloopTensorMapStorage = typename CollectiveMainloop::TensorMapStorage; alignas(128) MainloopTensorMapStorage mainloop; } tensormaps; }; static constexpr int SharedStorageSize = sizeof(SharedStorage); // Device side arguments struct Arguments { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel entry point API struct Params { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopParams mainloop{}; EpilogueParams epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerParams scheduler{}; void* workspace{nullptr}; }; // // Methods // // Convert to underlying arguments. In this case, a simple copy for the aliased type. static Params to_underlying_arguments(Arguments const& args, void* workspace) { CUTLASS_TRACE_HOST("to_underlying_arguments():"); ProblemShape problem_shapes = args.problem_shape; // Get SM count if needed, otherwise use user supplied SM count int sm_count = args.hw_info.sm_count; if (sm_count <= 0) { CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); } CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; // Calculate workspace pointers uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace); size_t workspace_offset = 0; void* scheduler_workspace = workspace_ptr; workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>( args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); void* epilogue_workspace = workspace_ptr + workspace_offset; workspace_offset += CollectiveEpilogue::get_workspace_size(problem_shapes, args.epilogue); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); void* mainloop_workspace = workspace_ptr + workspace_offset; workspace_offset += CollectiveMainloop::get_workspace_size(problem_shapes, args.mainloop, args.hw_info.sm_count); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); // Precompute the sub tiles numbers in epilogue, pass into tile scheduler. Therefore it will be used // in separate reduction scheme for streamk case, NumEpilogueSubTiles default value is 1, which means // subtile will not be used, therefore separate reduction will not be enabled. constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{}); TileSchedulerParams scheduler; if constexpr (IsGroupedGemmKernel) { scheduler = TileScheduler::to_underlying_arguments( problem_shapes, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles); } else { scheduler = TileScheduler::to_underlying_arguments( problem_shapes.get_host_problem_shape(), TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles); } return { args.mode, problem_shapes, CollectiveMainloop::to_underlying_arguments(problem_shapes, args.mainloop, mainloop_workspace), CollectiveEpilogue::to_underlying_arguments(problem_shapes, args.epilogue, epilogue_workspace), hw_info, scheduler, workspace }; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { bool implementable = true; if constexpr (IsGroupedGemmKernel) { // Group GEMM currently only supports rank-3 problem shapes implementable &= (args.mode == GemmUniversalMode::kGrouped && rank(typename ProblemShape::UnderlyingProblemShape{}) == 3); } else { implementable &= (args.mode == GemmUniversalMode::kArray && rank(typename ProblemShape::UnderlyingProblemShape{}) == 4); } if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements for Ptr Array Gemm or Grouped Gemm.\n"); return implementable; } implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); implementable &= TileScheduler::can_implement(args.scheduler); return implementable; } static size_t get_workspace_size(Arguments const& args) { size_t workspace_size = 0; constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{}); workspace_size += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>( args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles); workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); // Get SM count if needed, otherwise use user supplied SM count int sm_count = args.hw_info.sm_count; if (sm_count <= 0) { CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); } workspace_size += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, sm_count); workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); return workspace_size; } static cutlass::Status initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { Status status = Status::kSuccess; uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace); size_t workspace_offset = 0; constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{}); status = TileScheduler::template initialize_workspace<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>( args.scheduler, workspace_ptr + workspace_offset, stream, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles); workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>( args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); if (status != Status::kSuccess) { return status; } status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream, cuda_adapter); workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); status = CollectiveMainloop::initialize_workspace(args.problem_shape, args.mainloop, workspace_ptr + workspace_offset, stream); workspace_offset += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, args.hw_info.sm_count); workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); if (status != Status::kSuccess) { return status; } return status; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently TileSchedulerArguments args{}; if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) { args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; } args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM; dim3 grid_shape; if constexpr (IsGroupedGemmKernel) { grid_shape = TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); } else { grid_shape = TileScheduler::get_grid_shape(params.problem_shape.get_host_problem_shape(), TileShape{}, ClusterShape{}, params.hw_info, args); } return grid_shape; } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); #else // Preconditions static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads."); static_assert(size<0>(TileShape{}) >= 128, "Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension."); static_assert(cute::rank(UnderlyingStrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(UnderlyingStrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(UnderlyingStrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(UnderlyingStrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); /* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */ enum class WarpGroupRole { Producer = 0, Consumer0 = 1, Consumer1 = 2 }; enum class ProducerWarpRole { Mainloop = 0, Warp1 = 1, Epilogue = 2, Warp3 = 3 }; // Kernel level shared memory storage SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf); int thread_idx = int(threadIdx.x); int lane_idx = canonical_lane_idx(); int warp_idx = canonical_warp_idx_sync(); int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; int mma_thread_idx = thread_idx % size(TiledMma{}); auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); int lane_predicate = cute::elect_one_sync(); uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); // Note: Tma Descriptor Prefetch (from either const or param) is not applicable here // Mainloop Load pipeline using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; typename MainloopPipeline::Params mainloop_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; mainloop_pipeline_params.num_consumers = size(TiledMma{}); mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{}); // Epilogue Load pipeline using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; typename EpiLoadPipeline::Params epi_load_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; } epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; epi_load_pipeline_params.consumer_arv_count = size(TiledMma{}); epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); // Epilogue Store pipeline using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; typename EpiStorePipeline::Params epi_store_pipeline_params; epi_store_pipeline_params.always_wait = true; EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); typename LoadWarpOrderBarrier::Params params_load_order_barrier; params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1; params_load_order_barrier.group_size = NumThreadsPerWarp; LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier); // Initialize starting pipeline states for the collectives // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; // For the DMA Load (producer) we start with an opposite phase // i.e., we skip all waits since we know that the buffer is indeed empty PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>(); PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>(); auto cluster_wait_fn = [] () { // We need this to guarantee that the Pipeline init is visible // To all producers and consumer thread blocks in the Cluster if constexpr (size(ClusterShape{}) > 1) { cute::cluster_arrive_relaxed(); return [] () { cute::cluster_wait(); }; } else { __syncthreads(); return [] () {}; // do nothing } } (); // Get the appropriate blocks for this thread block -- potential for thread block locality TiledMma tiled_mma; auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) TileScheduler scheduler{params.scheduler}; auto work_tile_info = scheduler.get_current_work(); if (not work_tile_info.is_valid()) { return; } // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{}); // In a warp specialized kernel, collectives expose data movement and compute operations separately CollectiveMainloop collective_mainloop; CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); // Prepare and partition the input tensors. Expects a tuple of tensors where: // get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) // get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop); static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)"); // Extract out partitioned A and B. Tensor gA_mkl = get<0>(load_inputs); Tensor gB_nkl = get<1>(load_inputs); // Get pipeline stage increments from tensor shapes auto k_tile_count = size<3>(gA_mkl); // Wait for all thread blocks in the Cluster cluster_wait_fn(); if (warp_group_role == WarpGroupRole::Producer) { cutlass::arch::warpgroup_reg_dealloc<LoadRegisterRequirement>(); // Mainloop Producer Warp if (producer_warp_role == ProducerWarpRole::Mainloop) { int32_t curr_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx; int32_t next_batch = curr_batch; int32_t const mock_l_coord = 0; int32_t const sm_idx = blockIdx.x + (blockIdx.y * gridDim.x); int32_t const sm_count = params.hw_info.sm_count; // Fetch a copy of tensormaps for the CTA auto input_tensormaps = collective_mainloop.tensormaps_init(params.mainloop, sm_count, sm_idx); // Update tensormap for the initial batch for the CTA if (work_tile_info.is_valid()) { collective_mainloop.tensormaps_perform_update( shared_storage.tensormaps.mainloop, params.mainloop, input_tensormaps, problem_shape_MNKL, next_batch ); // Ensure warp is converged before issuing tensor replace __syncwarp(); // Entire warp must do this (ie its aligned) collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps); } bool do_load_order_arrive = true; while (work_tile_info.is_valid()) { if (!TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) { work_tile_info = fetch_next_work(work_tile_info, scheduler); continue; } // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); auto blk_coord = make_coord(m_coord, n_coord, _, mock_l_coord); // Get the number of K tiles to compute for this work as well as the starting K tile offset of the work. auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info); auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<3>(gA_mkl)), shape<3>(gA_mkl)); collective_mainloop.tensormaps_fence_acquire(input_tensormaps); collective_mainloop.load( params.mainloop, mainloop_pipeline, mainloop_pipe_producer_state, load_inputs, input_tensormaps, blk_coord, k_tile_iter, work_k_tile_count, lane_idx, block_rank_in_cluster, shared_storage.tensors.mainloop ); // Update starting pipeline state for the next tile // Wait for the last TMA stage to complete loading, before issuing tensormap updates mainloop_pipe_producer_state.advance(work_k_tile_count - 1); // Signal for the epilogue load warp to begin if (do_load_order_arrive) { load_order_barrier.arrive(); do_load_order_arrive = false; } // Get next work tile work_tile_info = fetch_next_work(work_tile_info, scheduler); next_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx if (work_tile_info.is_valid() && next_batch != curr_batch ) { if constexpr (IsGroupedGemmKernel) { problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(next_batch), Int<1>{}); } // Purpose of this pipeline state is to make sure TMA loads have finished before doing descriptor updates // Since this state is waiting for loads to finish, it must start in the inverted phase. typename CollectiveMainloop::PipelineState mainloop_pipe_tma_consumer_state = {mainloop_pipe_producer_state.index(), !mainloop_pipe_producer_state.phase(), mainloop_pipe_producer_state.count()}; mainloop_pipeline.consumer_wait(mainloop_pipe_tma_consumer_state); collective_mainloop.tensormaps_perform_update( shared_storage.tensormaps.mainloop, params.mainloop, input_tensormaps, problem_shape_MNKL, next_batch ); // Ensure warp is converged before issuing tensor replace __syncwarp(); // Entire warp must do this (ie its aligned) collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps); curr_batch = next_batch; } // Advance the producer state for the last remaining stage that was being waited for above mainloop_pipe_producer_state.advance(1); } // Scheduler work fetch loop // Make sure all Consumer Warp Groups have been waited upon collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); } // Mainloop Producer Warp End // Epilogue Producer Warp else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) { while (work_tile_info.is_valid()) { if (!TileScheduler::requires_separate_reduction(params.scheduler)) { load_order_barrier.wait(); } if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) { // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); epi_load_pipe_producer_state = collective_epilogue.load( epi_load_pipeline, epi_load_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, tiled_mma, lane_idx, shared_storage.tensors.epilogue, work_tile_info.reduction_subtile_idx() ); } // Get next work tile work_tile_info = fetch_next_work(work_tile_info, scheduler); if constexpr (IsGroupedGemmKernel) { if (work_tile_info.is_valid()) { problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{}); } } } // Scheduler work fetch loop // Make sure all Consumer Warp Groups have been waited upon collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); } // Epilogue Producer Warp End } // Producer Warp Group End else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { cutlass::arch::warpgroup_reg_alloc<MmaRegisterRequirement>(); // Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it bool do_store_tail = false; while (work_tile_info.is_valid()) { // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); // Allocate the accumulators for the (M,N) blk_shape // // MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead. auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) if(TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) { collective_mainloop.mma( mainloop_pipeline, mainloop_pipe_consumer_state, accumulators, work_k_tile_count, mma_thread_idx, shared_storage.tensors.mainloop, params.mainloop ); // Make sure the math instructions are done and free buffers before entering the epilogue collective_mainloop.mma_tail( mainloop_pipeline, mainloop_pipe_consumer_state, work_k_tile_count ); // Update starting mainloop pipeline state for the next tile mainloop_pipe_consumer_state.advance(work_k_tile_count); } // Index of warp group within consumer warp groups int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups; // Perform reduction across splits, if needed TileScheduler::fixup( params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx); if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) { // Epilogue and write to gD auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = collective_epilogue.store( epi_load_pipeline, epi_load_pipe_consumer_state, epi_store_pipeline, epi_store_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, accumulators, tiled_mma, mma_thread_idx, shared_storage.tensors.epilogue, work_tile_info.reduction_subtile_idx() ); epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next; epi_store_pipe_producer_state = epi_store_pipe_producer_state_next; do_store_tail = true; } // Get next work tile work_tile_info = fetch_next_work(work_tile_info, scheduler); if constexpr (IsGroupedGemmKernel) { if (work_tile_info.is_valid()) { problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{}); } } } // Scheduler work fetch loop if (do_store_tail) { collective_epilogue.store_tail( epi_load_pipeline, epi_load_pipe_consumer_state, epi_store_pipeline, epi_store_pipe_producer_state ); } } // Consumer Warp Groups End #endif } private: // Kernel helper function to get next work unit CUTLASS_DEVICE typename TileScheduler::WorkTileInfo fetch_next_work( typename TileScheduler::WorkTileInfo& work_tile_info, TileScheduler& scheduler) const { // Check whether we should continue on with the current work unit. If this is the case, // the work unit will have been updated in continue_current_work to reflect the new // tile to be computed. if (scheduler.continue_current_work(work_tile_info)) { return work_tile_info; } // Get next work tile scheduler.advance_to_next_work(); return scheduler.get_current_work(); } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel
cutlass/include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp", "repo_id": "cutlass", "token_count": 13400 }
37
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief Utilities for selecting default tile schedulers */ #include "cutlass/detail/dependent_false.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp" //////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm { //////////////////////////////////////////////////////////////////////////////// // // Tags for specifying tile schedulers // struct PersistentScheduler { }; struct StreamKScheduler { }; struct GroupScheduler { }; // Only used for Grouped GEMMs //////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm //////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel::detail { // // Selectors mapping tile scheduler tag and arch tag to a tile scheduler class // template < class TileSchedulerTag, class ArchTag, class TileShape, class ClusterShape , class ProblemShapeType = void > struct TileSchedulerSelector { static_assert(cutlass::detail::dependent_false<ArchTag>, "Could not select a tile scheduler for given parameters."); }; template < class ArchTag, class TileShape, class ClusterShape > struct TileSchedulerSelector< PersistentScheduler, ArchTag, TileShape, ClusterShape > { using Scheduler = PersistentTileSchedulerSm90; }; // Default (void) for Sm90 maps to PersistentTileSchedulerSm90 template < class ArchTag, class TileShape, class ClusterShape > struct TileSchedulerSelector< void, ArchTag, TileShape, ClusterShape > { using Scheduler = typename TileSchedulerSelector< PersistentScheduler, ArchTag, TileShape, ClusterShape >::Scheduler; }; template < class TileShape, class ClusterShape > struct TileSchedulerSelector< StreamKScheduler, arch::Sm90, TileShape, ClusterShape > { using Scheduler = PersistentTileSchedulerSm90StreamK<TileShape, ClusterShape>; }; template < class TileShape, class ClusterShape , class GroupProblemShape > struct TileSchedulerSelector< GroupScheduler, arch::Sm90, TileShape, ClusterShape , GroupProblemShape > { using Scheduler = PersistentTileSchedulerSm90Group<GroupProblemShape>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel::detail ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp", "repo_id": "cutlass", "token_count": 1192 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting simt instructions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/threadblock/mma_pipelined.h" #include "cutlass/gemm/threadblock/mma_singlestage.h" #include "cutlass/arch/cache_operation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { template < /// Shape of threadblock-scoped matrix multiply operator typename Shape, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Size of a threadblock-scoped access int kAccessSizeInBits = -1, // -1 denoting the default /// Number of stages int Stages = 2, /// Operation performed by MMA typename Operator = typename platform::conditional< (platform::is_same<OperatorClass, cutlass::arch::OpClassTensorOp>::value) && (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), cutlass::arch::OpMultiplyAddSaturate, cutlass::arch::OpMultiplyAdd>::type, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA = cutlass::arch::CacheOperation::Global, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB = cutlass::arch::CacheOperation::Global, /// per-element transformation for elements of A ComplexTransform TransformA = ComplexTransform::kNone, /// per-element transformation for elements of B ComplexTransform TransformB = ComplexTransform::kNone, bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value) > struct DefaultMmaCoreWithAccessSize; template < /// Shape of threadblock-scoped matrix multiply operator typename Shape, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Number of stages int Stages, /// Operation performed by MMA typename Operator, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA, /// per-element transformation for elements of B ComplexTransform TransformB, bool IsComplex > struct DefaultMmaCoreWithAccessSize< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, -1, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex > : DefaultMmaCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex > {}; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Size of a threadblock-scoped access (a value of -1 indicates the default) int kAccessSizeInBits_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCoreWithAccessSize<Shape_, WarpShape_, typename platform::enable_if<kAccessSizeInBits_ != -1, GemmShape<1, 1, 1>>::type, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, kAccessSizeInBits_, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; static int const kElementsPerAccessDefault = 1; static_assert(kAccessSizeInBits_ == -1 || sizeof_bits<ElementA>::value == sizeof_bits<ElementB>::value || kAccessSizeInBits_ / sizeof_bits<ElementA>::value == kElementsPerAccessDefault, "Non-default value for kAccessSizeInBits_ is only allowed if size(elementA) == sizeof(elementB)"); static int const kElementsPerAccess = (kAccessSizeInBits_ != -1) ? kAccessSizeInBits_ / sizeof_bits<ElementA>::value : kElementsPerAccessDefault; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h", "repo_id": "cutlass", "token_count": 4136 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a threadblock-scoped GEMV kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix-vector product using SIMT math instructions. template < class Core_ //< GemvCore > class Gemv { public: using Shape = typename Core_::Shape; /// The MMA operator that computes GEMV using Operator = typename Core_::Operator; /// Iterates over A in global memory using IteratorA = typename Core_::IteratorA; /// Iterates over B in global memory using IteratorB = typename Core_::IteratorB; /// Fragment of operand C loaded from global memory using IteratorC = typename Core_::IteratorC; /// Fragment of operand A loaded from global memory using FragmentA = typename IteratorA::Fragment; /// Fragment of operand B loaded from global memory using FragmentB = typename IteratorB::Fragment; /// Fragment of operand accumulator loaded/stored to global memory using FragmentC = typename Operator::FragmentC; /// Shape of the per-thread GEMV operation using ThreadShape = typename Core_::ThreadShape; public: CUTLASS_DEVICE Gemv() { } CUTLASS_DEVICE void operator()( GemmCoord const &problem_size, ///< problem size of batched GEMV FragmentC &accum, ///< destination accumulator tile IteratorA iterator_A, ///< iterator over A operand in global memory IteratorB iterator_B, ///< iterator over B operand in global memory FragmentC const &src_accum) { ///< source accumualtor tile // // Prologue // FragmentA frag_A; FragmentB frag_B; frag_A.clear(); frag_B.clear(); iterator_A.load(frag_A); iterator_B.load(frag_B); ++iterator_A; ++iterator_B; // // Mainloop // Operator thread_mma; int gemm_k = problem_size.k(); if (gemm_k < Shape::kK) { iterator_A.clear_mask(); iterator_B.clear_mask(); } // iterate over K to accumulate result CUTLASS_GEMM_LOOP for (; gemm_k > 0; gemm_k -= Shape::kK) { thread_mma(accum, frag_A, frag_B, accum); iterator_A.load(frag_A); iterator_B.load(frag_B); ++iterator_A; ++iterator_B; if (gemm_k < Shape::kK) { iterator_A.clear_mask(); iterator_B.clear_mask(); } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/gemv.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/gemv.h", "repo_id": "cutlass", "token_count": 1482 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implements streamk threadblock mapping blockIdx to GEMM problems. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/matrix.h" #include "cutlass/platform/platform.h" #include "cutlass/gemm/gemm_enumerated_types.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/gemm/threadblock/index_remat.h" #if !defined(__CUDACC_RTC__) #include <iostream> #include "cutlass/core_io.h" #include "cutlass/trace.h" #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Threadblock mapping control for GEMMs struct ThreadblockSwizzleStreamK { /// Advertise StreamkFeature using StreamkFeature = void; /// Kernel traits template <typename GemmKernel> struct KernelTraits {}; /// Reduction strategy enum ReductionStrategy { kNone, // Data-parallel strategy (no seams, fixup, etc.) kAtomic, // Non-deterministic reduction of SK-block partials using atomic aggregation in L2 kMixed, // Deterministic reduction of SK-block partials employing either: // (a) A separate wave of reduction thread blocks" (for scenarios with lots of // SK-blocks per SK-tile) // (b) Turnstile-ordered atomic aggregation in L2 (for scenarios with few // SK-blocks per SK-tile) }; static ReductionStrategy const kReductionStrategy = kMixed; // // Heuristics // /// Data-parallel wave-quantization efficiency threshold (above which we go data-parallel) static float constexpr kDpEfficiencyThreshold = 0.92f; /// Minimum number of MAC-iterations per streamk block static int const kMinItersPerSkBlock = 2; /// Height in CTAs of a grid rasterization cohort static int const kCohortCtasM = 8; /// Width in CTAs of a grid rasterization cohort static int const kCohortCtasN = 4; /// Number of CTAs per cohort static int const kCtasPerCohort = kCohortCtasN * kCohortCtasM; /// Cost-equivalent number of SM-iterations for fixup I/O static int const kFixupStartupIterEquiv = 10; static int const kFixupPeerIterEquiv = 3; // // Member state // /// The 3D value-extents of the GEMM computation volume (m,n,k) GemmCoord problem_size; /// Div/mod accelerators FastDivmod div_mod_tiled_shape_m; FastDivmod div_mod_tiled_shape_n; FastDivmod div_mod_tiled_cohort_shape_n; FastDivmod div_mod_iters_per_tile; /// Whether to perform cohort CTA rasterization bool cohort_raster; // Whether to pad and remap block indices bool remap_block_indices; /// CTA occupancy per SM int sm_occupancy; /// Number of SMs for dispatch heuristics to load-balance using Stream-K CTAs (wave size) int avail_sms; int dp_blocks; /// Number of data-parallel thread blocks in the grid int dp_first_wave_tiles; /// Number of output tiles each CTA in the first DP wave will produce /// Number of reduction blocks in the grid int reduction_blocks; int sk_waves; int sk_tiles; int sk_big_blocks_per_region; int sk_iters_per_region; /// Div/mod accelerators FastDivmod div_mod_sk_iters_per_normal_block; FastDivmod div_mod_sk_iters_per_big_block; FastDivmod div_mod_sk_iters_per_region; FastDivmod div_mod_sk_regions; //!! used in block map FastDivmod div_mod_sk_blocks_per_region; //!! used in block map /// The batch count int batch_count; // // Host+device interface // /// Constructor ThreadblockSwizzleStreamK() = default; /// Returns the GEMM volume in thread block tiles CUTLASS_HOST_DEVICE GemmCoord tiled_shape() const { return GemmCoord( static_cast<int>(div_mod_tiled_shape_m), static_cast<int>(div_mod_tiled_shape_n), batch_count); } /// Number of iterations per output tile CUTLASS_HOST_DEVICE int iters_per_tile() const { return static_cast<int>(div_mod_iters_per_tile); } /// Number of iterations for normal SK-blocks CUTLASS_HOST_DEVICE int sk_iters_per_normal_block() const { return static_cast<int>(div_mod_sk_iters_per_normal_block); } /// Number of SK regions CUTLASS_HOST_DEVICE int sk_regions() const { return static_cast<int>(div_mod_sk_regions); } /// Number of SK blocks per region (splitting factor) CUTLASS_HOST_DEVICE int sk_blocks_per_region() const { return static_cast<int>(div_mod_sk_blocks_per_region); } // // Host-side interface // /// Debug print void Print() { #ifndef __CUDA_ARCH__ auto tiles = tiled_shape().mn().product(); std::cout << "problem_size: (" << problem_size.m() << "," << problem_size.n() << ")" << ", tiled_shape: (" << tiled_shape().m() << "," << tiled_shape().n() << ")" << ", tiles: " << tiles << ", dp_tiles: " << tiles - sk_tiles << ", sk_tiles: " << sk_tiles << ", iters_per_tile: " << iters_per_tile() << ", reduction_blocks: " << reduction_blocks << ", dp_blocks: " << dp_blocks << ", dp_waves: " << dp_blocks / avail_sms << ", dp_first_wave_tiles: " << dp_first_wave_tiles << ", sk_blocks_per_region: " << sk_blocks_per_region() << ", sk_regions: " << sk_regions() << ", sk_waves: " << sk_waves << ", sk_iters_per_normal_block: " << sk_iters_per_normal_block() << ", sk_big_blocks_per_region: " << sk_big_blocks_per_region << ", remap_block_indices: " << remap_block_indices << ", cohort_raster: " << cohort_raster << ", sm_occupancy: " << sm_occupancy << ", avail_sms: " << avail_sms << ", num_blocks: " << get_num_blocks() << "\n\n"; #endif } // Compute sk_blocks to dispatch for a given number of sk_tiles static void get_sk_blocks( int &sk_blocks, /// [out] int &savings_iters, /// [out] int sk_tiles, int iters_per_tile, int avail_sms, int max_sk_occupancy, bool allow_partial_wave) { savings_iters = INT_MIN; sk_blocks = 0; if (sk_tiles == 0) { return; } int sk_iters = sk_tiles * iters_per_tile; int dp_equiv_waves = (sk_tiles + avail_sms - 1) / avail_sms; int dp_equiv_iters = iters_per_tile * dp_equiv_waves; int min_sk_blocks = (allow_partial_wave) ? fast_min(avail_sms, sk_tiles + 1) : avail_sms; int max_sk_blocks = fast_min(avail_sms * max_sk_occupancy, sk_iters / kMinItersPerSkBlock); for (int trial_sk_blocks = min_sk_blocks; trial_sk_blocks <= max_sk_blocks; ++trial_sk_blocks) { int sk_waves = (trial_sk_blocks + avail_sms - 1) / avail_sms; int max_sk_iters_per_block = (sk_iters + trial_sk_blocks - 1) / trial_sk_blocks; int sk_iter_equiv = max_sk_iters_per_block * sk_waves; int num_peers = ((trial_sk_blocks + sk_tiles - 1) / sk_tiles) + 1; // add one for alignment skew float iter_cost = 0.02f * float(num_peers) * float(sk_iter_equiv); if (trial_sk_blocks % sk_tiles == 0) { // aligned num_peers = (trial_sk_blocks / sk_tiles); iter_cost = 0.0f; } float peer_cost = 2.0f * float(num_peers); float base_cost = 2.0f * float(sk_waves); int fixup_iter_equiv = int(base_cost + iter_cost + peer_cost); int trial_savings_iters = dp_equiv_iters - sk_iter_equiv - fixup_iter_equiv; if (trial_savings_iters >= savings_iters) { savings_iters = trial_savings_iters; sk_blocks = trial_sk_blocks; } } } /// Determine the populations of DP and SK blocks to invoke for the given number of output tiles static void get_blocks( int &dp_tiles, /// [out] int &sk_blocks, /// [out] int output_tiles, int iters_per_tile, int avail_sms, int sm_occupancy) { int full_waves = output_tiles / avail_sms; int full_wave_tiles = full_waves * avail_sms; int partial_wave_tiles = output_tiles - full_wave_tiles; int score = -1; dp_tiles = output_tiles; sk_blocks = 0; if (partial_wave_tiles == 0) { // Perfect quantization return; } if (full_waves < sm_occupancy) { // We're less than full GPU occupancy // Form the SK wave from the partial wave to get us up to full GPU occupancy int max_sk_occupancy = sm_occupancy - full_waves; dp_tiles = full_wave_tiles; get_sk_blocks( sk_blocks, score, partial_wave_tiles, iters_per_tile, avail_sms, max_sk_occupancy, true); // we can run with less than a full wave of SK-blocks if (score < 0) { // not profitable sk_blocks = 0; dp_tiles = output_tiles; } return; } // We're at (or greater) than GPU occupancy if ((sm_occupancy > 1 ) && (full_waves % sm_occupancy == sm_occupancy - 1)) { // If occupancy is more than one CTA per SM, form the SK wave from the partial // wave to get us to full GPU occupancy int max_sk_occupancy = 1; dp_tiles = full_wave_tiles; get_sk_blocks( sk_blocks, score, partial_wave_tiles, iters_per_tile, avail_sms, max_sk_occupancy, true); // we can run with less than a full wave of SK-blocks if (score >= 0) { return; } } // Form the SK wave by combining the last full wave and the partial wave // We're less than full GPU occupancy dp_tiles = full_wave_tiles - avail_sms; int max_sk_occupancy = sm_occupancy - ((full_waves - 1) % sm_occupancy); get_sk_blocks( sk_blocks, score, partial_wave_tiles + avail_sms, iters_per_tile, avail_sms, max_sk_occupancy, false); // we cannot run with less than a full wave of SK-blocks if (score < 0) { // not profitable sk_blocks = 0; dp_tiles = output_tiles; } } /// Constructor: *Gemm* problem size (m, n, k) ThreadblockSwizzleStreamK( GemmUniversalMode const mode_, GemmCoord const problem_size_, GemmCoord const tile_size_, int const batch_split_, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) int const sm_occupancy_, int const device_sms_, int const avail_sms_, /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) size_t const element_A_bytes_, size_t const element_B_bytes_, size_t const element_C_bytes_, int const epilogue_acc_fragments_) : problem_size(problem_size_), batch_count((mode_ == GemmUniversalMode::kBatched || mode_ == GemmUniversalMode::kArray) ? batch_split_ : 1), reduction_blocks(0), dp_blocks(0), dp_first_wave_tiles(1), // Default: one tile per DP-block in the first wave of DP blocks sk_tiles(0), sk_big_blocks_per_region(0), sk_iters_per_region(0), sk_waves(0), sm_occupancy(sm_occupancy_), remap_block_indices(false), avail_sms(fast_max(1, avail_sms_)), cohort_raster(false) { int gpu_occupancy = device_sms_ * sm_occupancy; int iters_per_tile = (problem_size.k() + tile_size_.k() - 1) / tile_size_.k(); int sk_iters_per_normal_block = 0; int sk_regions = 1; // Default: a single region of iteration space (across all SK tiles) int sk_blocks_per_region = 0; GemmCoord tiled_shape( (problem_size.m() + tile_size_.m() - 1) / tile_size_.m(), (problem_size.n() + tile_size_.n() - 1) / tile_size_.n(), batch_count); size_t problem_bytes = (element_C_bytes_ * problem_size.m() * problem_size.n()) + (element_A_bytes_ * problem_size.m() * problem_size.k()) + (element_B_bytes_ * problem_size.k() * problem_size.n()); size_t problem_flops = size_t(problem_size.m()) * size_t(problem_size.n()) * size_t(problem_size.k()) * 2; [[maybe_unused]] float flops_per_byte = float(problem_flops) / float(problem_bytes); int output_tiles = tiled_shape.m() * tiled_shape.n(); int waves = (output_tiles + avail_sms - 1) / avail_sms; [[maybe_unused]] float dp_efficiency = float(output_tiles) / float(waves * avail_sms); // // Determine dispatch composition of DP-tiles and SK-blocks // // Start with a DP-only configuration int dp_tiles = output_tiles; // Number of data-parallel tiles int sk_blocks = 0; // Number of thread blocks to produce the remaining SK tiles // Only kGemm mode allows for SK load balancing if (mode_ == GemmUniversalMode::kGemm) { int split_factor = batch_split_; if (split_factor > 1) { // Split-K override dp_tiles = 0; sk_blocks = output_tiles * split_factor; } else if ((kReductionStrategy != kNone) && // Load-balancing strategy statically enabled (avail_sms > 1)) // Plurality of SMs to load balance across { // Use heuristics get_blocks( dp_tiles, /// [out] sk_blocks, /// [out] output_tiles, iters_per_tile, avail_sms, sm_occupancy); } } sk_tiles = output_tiles - dp_tiles; // Compute SK block iteration details if (sk_blocks > 0) { sk_waves = (sk_blocks + avail_sms - 1) / avail_sms; int sk_iters = sk_tiles * iters_per_tile; sk_blocks = fast_min(sk_blocks, sk_iters); sk_iters_per_normal_block = sk_iters / sk_blocks; int extra_sk_iters = sk_iters - (sk_iters_per_normal_block * sk_blocks); int sk_big_blocks = extra_sk_iters; if ((sk_blocks > sk_tiles) && (sk_blocks % sk_tiles == 0)) { // Split-K decomposition sk_regions = sk_tiles; } sk_blocks_per_region = sk_blocks / sk_regions; sk_big_blocks_per_region = sk_big_blocks / sk_regions; sk_iters_per_region = sk_iters / sk_regions; // Use a separate reduction wave when all of: // - Non-atomic reduction stratgy // - The number of SK waves won't fully occupy the GPU (Otherwise we don't have // a strong-scaling case for more parallel reduction) // - More than three peers working on an SK tile. (This occurs when the ratio of // SK-blocks to SK-tiles > 2, as a single tile may be covered by four SK-blocks, // e.g.:[partial-block | block | block | partial-block] ). With three or // less peers, the two non-finishing SK-blocks are not expexted to contend. if ((kReductionStrategy == kMixed) && (sk_waves < sm_occupancy) && (sk_blocks > 2 * sk_tiles)) { // Launch a reduction block for every accumulator fragment in each SK-tile reduction_blocks = sk_tiles * epilogue_acc_fragments_; } // When we have a multi-occupancy kernel and at least two waves of active blocks (where // at least one wave is SK blocks), we need to (1) dispatch at least four waves, and (2) // remap the block indices so that we can reliably spread the SK blocks evenly across the // device's first SM occupancy valence. Also see get_num_blocks() and get_block_idx(). remap_block_indices = ( (sm_occupancy > 1) && (device_sms_ == avail_sms) && (get_num_active_blocks() > avail_sms * 2)); // Initialize fast div/mod members related to SK div_mod_sk_iters_per_normal_block = FastDivmod(sk_iters_per_normal_block); div_mod_sk_iters_per_big_block = FastDivmod(sk_iters_per_normal_block + 1); div_mod_sk_iters_per_region = FastDivmod(sk_iters_per_region); div_mod_sk_regions = FastDivmod(sk_regions); div_mod_sk_blocks_per_region = FastDivmod(sk_blocks_per_region); } // // Compute DP blocks // dp_blocks = dp_tiles; cutlass::gemm::GemmCoord tiled_cohort_shape( (tiled_shape.m() + kCohortCtasM - 1) / kCohortCtasM, (tiled_shape.n() + kCohortCtasN - 1) / kCohortCtasN, tiled_shape.k()); int cohort_blocks = (tiled_cohort_shape.m() * tiled_cohort_shape.n()) * kCtasPerCohort; float cohort_efficiency = float(dp_blocks) / float(cohort_blocks); // Check if the SK tiles would be in cohorts that are in-bounds bool sk_in_range = true; if (sk_tiles > 0) { int last_sk_tile = sk_tiles - 1; int cohort_tile_idx = last_sk_tile / kCtasPerCohort; int cohort_grid_m = cohort_tile_idx / tiled_cohort_shape.n(); int cohort_grid_n = (cohort_grid_m > 0) ? tiled_cohort_shape.n() - 1 : cohort_tile_idx % tiled_cohort_shape.n(); if ((((cohort_grid_m + 1) * kCohortCtasM) >= tiled_shape.m()) || (((cohort_grid_n + 1) * kCohortCtasN) >= tiled_shape.n())) { sk_in_range = false; } } // Decide if we're going to be doing cohort raster if (sk_in_range && (dp_blocks >= gpu_occupancy * 2) && (cohort_efficiency > 0.85f)) { cohort_raster = true; dp_blocks = cohort_blocks; } else if (sk_waves > 0) { // Update semi-persistence of first DP wave to ensure full grid wavesets // (Only applies when there's an SK component and we're not doing blocked cohort rasterization) int dp_tile_waves = (dp_tiles + avail_sms - 1) / avail_sms; int full_dp_tile_waves = dp_tiles / avail_sms; int waveset_excess = (sk_waves + dp_tile_waves) % sm_occupancy; if (dp_first_wave_tiles + waveset_excess <= full_dp_tile_waves) { dp_first_wave_tiles += waveset_excess; dp_blocks -= (waveset_excess * avail_sms); } } // Setup fast-div/mod for device-side usage div_mod_tiled_shape_m = FastDivmod(tiled_shape.m()); div_mod_tiled_shape_n = FastDivmod(tiled_shape.n()); div_mod_tiled_cohort_shape_n = FastDivmod(tiled_cohort_shape.n()); div_mod_iters_per_tile = FastDivmod(iters_per_tile); } /// Number of blocks performing useful work int get_num_active_blocks() const { return (sk_waves * avail_sms) + dp_blocks + reduction_blocks; } /// Obtains number of threadblocks per GEMM int get_num_blocks() const { int active_blocks = get_num_active_blocks(); if (remap_block_indices) { // Add padding blocks if we are performing remapping in order to dispatch a grid of at least four waves return fast_max(active_blocks, avail_sms * 4); } return active_blocks; } /// Obtains grid extents in CTAs dim3 get_grid_dims() const { return dim3(get_num_blocks(), 1, batch_count); } // // Device-side interface // /// Obtains number of threadblocks per GEMM CUTLASS_DEVICE int device_num_blocks() const { return gridDim.x; } /// Obtains tile index for the given sk iteration CUTLASS_DEVICE int get_sk_tile_idx(int iter) const { int tile_idx = div_mod_iters_per_tile.div(iter); return tile_idx; } /// Obtains the batch index CUTLASS_DEVICE int get_batch_idx() const { return RematerializeBlockIdxZ(); } /// Obtains the calling threadblock's tiled coordinates for the given tile index CUTLASS_DEVICE GemmCoord get_tile_offset(int tile_idx) const { int m, n; // row-major raster div_mod_tiled_shape_n(m, n, tile_idx); if (tiled_shape().m() < tiled_shape().n()) { // column-major raster div_mod_tiled_shape_m(n, m, tile_idx); } if (cohort_raster) { // tiled cohort raster int cohort_tile_idx = tile_idx / kCtasPerCohort; int cohort_grid_m, cohort_grid_n; div_mod_tiled_cohort_shape_n(cohort_grid_m, cohort_grid_n, cohort_tile_idx); int block_idx_cohort = tile_idx % kCtasPerCohort; int block_cohort_m = block_idx_cohort / kCohortCtasN; int block_cohort_n = block_idx_cohort % kCohortCtasN; m = (cohort_grid_m * kCohortCtasM) + block_cohort_m; n = (cohort_grid_n * kCohortCtasN) + block_cohort_n; } return GemmCoord(m, n, get_batch_idx()); } /// Obtains the calling threadblock's tiled coordinates for the given tile index (row-major rasterization) CUTLASS_DEVICE GemmCoord get_tile_offset_row_major(int tile_idx) const { // row-major raster int m, n; div_mod_tiled_shape_n(m, n, tile_idx); return GemmCoord(m, n, get_batch_idx()); } /// Obtains calling threadblock's linear threadblock index CUTLASS_DEVICE int get_block_idx() const { int block_idx = RematerializeBlockIdxX(); // Remap the block indices for the first two waves of thread blocks if // we have multi-occupancy and the grid constitutes four or more waves if (remap_block_indices && (block_idx < avail_sms * 2)) { int dest_sm = block_idx / 2; int dest_wave = block_idx % 2; int remapped_block_idx = dest_sm + (dest_wave * avail_sms); block_idx = remapped_block_idx; } // Remap block indices to interleave SK regions to limit intra-region waiting if (block_idx < sk_regions() * sk_blocks_per_region()) { int block_in_region; int region; div_mod_sk_regions(block_in_region, region, block_idx); block_idx = (region * sk_blocks_per_region()) + block_in_region; } return block_idx; } /// Obtains calling linear threadblock index of the first block to work on the given tile CUTLASS_DEVICE int get_sk_block_idx(int iter) const { int region_idx; int iter_in_region; div_mod_sk_iters_per_region(region_idx, iter_in_region, iter); int big_block_iters = (sk_big_blocks_per_region * sk_iters_per_normal_block()) + sk_big_blocks_per_region; // number of iterations in the region's big blocks int normal_block_iters = iter_in_region - big_block_iters; // number of iterations in the region's normal blocks int big_block_idx_in_region = div_mod_sk_iters_per_big_block.div(iter_in_region); int normal_block_idx_in_region = sk_big_blocks_per_region + div_mod_sk_iters_per_normal_block.div(normal_block_iters); int block_idx_in_region = (big_block_idx_in_region < sk_big_blocks_per_region) ? big_block_idx_in_region : normal_block_idx_in_region; int owning_block_idx = (sk_blocks_per_region() * region_idx) + block_idx_in_region; return owning_block_idx; } /// Obtains iteration extends for the given SK block index CUTLASS_DEVICE void get_iter_extents( int sk_block_idx, int &block_iter_begin, int &block_iter_end) const { int region_idx; int block_idx_in_region; div_mod_sk_blocks_per_region(region_idx, block_idx_in_region, sk_block_idx); block_iter_begin = (region_idx * sk_iters_per_region) + (block_idx_in_region * sk_iters_per_normal_block()); // Adjust extents for the first "num_big_blocks" blocks that get one extra iteration int block_iters = sk_iters_per_normal_block(); if (block_idx_in_region < sk_big_blocks_per_region) { // This is a +1 iteration block block_iter_begin += block_idx_in_region; block_iters++; } else { // This is a regular block block_iter_begin += sk_big_blocks_per_region; } block_iter_end = block_iter_begin + block_iters; } /// Obtains calling linear threadblock index of the first block to work on the given tile CUTLASS_DEVICE int get_first_block_idx(int tile_idx, int block_idx) const { if (tile_idx >= sk_tiles) { // DP tile return block_idx; } int iter = tile_idx * iters_per_tile(); return get_sk_block_idx(iter); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h", "repo_id": "cutlass", "token_count": 10734 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/thread/mma.h" #include "cutlass/gemm/warp/mma_simt_tile_iterator.h" #include "cutlass/gemm/warp/mma_simt_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK = 1, /// Complex transformation on operand A ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transformation on operand B ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool > class MmaSimt { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Indicates class of matrix operator using OperatorClass = arch::OpClassSimt; /// Hard-coded for now using ArchTag = arch::Sm50; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; /// Layout of threads using ThreadLayoutA = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA >::value, layout::ColumnMajor, typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value, layout::RowMajor, LayoutA>::type >::type; using ThreadLayoutB = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutB >::value, layout::ColumnMajor, typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutB >::value, layout::RowMajor, LayoutB>::type >::type; static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value || platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) && platform::is_same< ElementA, int8_t >::value && platform::is_same< ElementB, int8_t >::value; using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type; /// Thread-level matrix multiply accumulate operator using ThreadMma = thread::Mma< GemmShape< Shape::kM / Policy::WarpShape::kRow, Shape::kN / Policy::WarpShape::kColumn, Policy::LaneMmaShape::kK>, ElementA, ThreadLayoutA, ElementB, ThreadLayoutB, ElementC, LayoutC, arch::OpMultiplyAdd, dp4a_type >; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename ThreadMma::ArchMmaOperator; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Shape of the underlying instruction using InstructionShape = GemmShape<1,1,use_dp4a ? 4 : 1>; public: /// Iterates over the A operand in memory using IteratorA = MmaSimtTileIterator< MatrixShape<Shape::kM, Policy::LaneMmaShape::kK>, Operand::kA, ElementA, LayoutA, Policy, PartitionsK, Shape::kK >; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile using TransformedFragmentA = FragmentA; /// Iterates over the B operand in memory using IteratorB = MmaSimtTileIterator< MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, Policy, PartitionsK, Shape::kK >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed A tile using TransformedFragmentB = FragmentB; /// Iterates over the C operand in memory using IteratorC = MmaSimtTileIterator< MatrixShape<Shape::kM, Shape::kN>, Operand::kC, ElementC, LayoutC, Policy >; /// Storage for C tile using FragmentC = typename ThreadMma::FragmentC; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaSimt() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &d, FragmentA a, FragmentB b, FragmentC const &c, int group_idx = 0) const { ThreadMma mma; if (kTransformA == ComplexTransform::kConjugate) { conjugate<FragmentA> conj_a; a = conj_a(a); } if (kTransformB == ComplexTransform::kConjugate) { conjugate<FragmentB> conj_b; b = conj_b(b); } mma(d, a, b, c); } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { dst_A = A; dst_B = B; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/warp/mma_simt.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_simt.h", "repo_id": "cutlass", "token_count": 2843 }
42
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Reduce operand A or B along K dimension bool ReduceKForA_, /// Number of partitions along K dimension int PartitionsK_ = 1, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Used for partial specialization typename Enable = bool > class MmaWithReductionTensorOp { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename Policy::Operator; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Architecture tag from underlying instruction using ArchTag = typename ArchMmaOperator::ArchTag; /// Indicates class of matrix operator using OperatorClass = arch::OpClassTensorOp; /// Shape of underlying instruction using InstructionShape = typename ArchMmaOperator::Shape; /// Complex transform on A operand static ComplexTransform const kTransformA = ComplexTransform::kNone; /// Complex transform on B operand static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Number of threads participating in warp-level matrix product static int const kThreadCount = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; static bool const kReduceKForA = ReduceKForA_; static_assert(platform::is_same<ElementA, cutlass::half_t>::value || platform::is_same<ElementA, cutlass::bfloat16_t>::value, "ElementA needs to be fp16 or bf16."); static_assert(platform::is_same<ElementB, cutlass::half_t>::value || platform::is_same<ElementB, cutlass::bfloat16_t>::value, "ElementB needs to be fp16 or bf16."); static_assert(platform::is_same<InstructionShape, cutlass::gemm::GemmShape<16, 8, 16>>::value, "Only supports 16x8x16 tensor core instruction."); static_assert(!AccumulatorsInRowMajor, "Only calls tensor core instructions in column major."); public: /// Iterates over the A operand in memory using IteratorA = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA, MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>; /// Iterates over the B operand in memory using IteratorB = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed B tile using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>; /// Iterates over the C operand in memory using IteratorC = MmaTensorOpAccumulatorTileIterator< MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, typename ArchMmaOperator::Shape, typename Policy::OpDelta>; /// Storage for C tile using FragmentC = typename IteratorC::Fragment; /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN >; using FragmentReduction = Array<ElementC, kReduceKForA ? (Shape::kM / 8) : (Shape::kN / 8)>; public: /// Underlying matrix multiply operator (concept: arch::Mma) ArchMmaOperator mma; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaWithReductionTensorOp() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, TransformedFragmentA const &A, TransformedFragmentB const &B, FragmentC const &C, FragmentReduction &gemm_k_reduction ) const { using MmaOperandA = typename ArchMmaOperator::FragmentA; using MmaOperandB = typename ArchMmaOperator::FragmentB; using MmaOperandC = typename ArchMmaOperator::FragmentC; D = C; [[maybe_unused]] MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A); [[maybe_unused]] MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B); [[maybe_unused]] MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D); #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) assert(0); #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) // Serpentine visitation order maximizing reuse of Ra CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); mma(ptr_D[m + n_serpentine * MmaIterations::kRow], ptr_A[m], ptr_B[n_serpentine], ptr_D[m + n_serpentine * MmaIterations::kRow]); if (!kReduceKForA && m == 0) { #if 0 gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4]); gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 1]); gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 2]); gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 3]); #else uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&B); if (platform::is_same<ElementB, cutlass::half_t>::value) { asm volatile( "{\n\t" " .reg .f16 low, high;\n\t" " .reg .f32 tmp;\n\t" " mov.b32 {low, high}, %1;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %0, tmp, %0;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %0, tmp, %0;\n\t" " mov.b32 {low, high}, %2;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %0, tmp, %0;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %0, tmp, %0;\n\t" "}\n\t" : "+f"(gemm_k_reduction[n_serpentine]) : "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1])); } else if (platform::is_same<ElementB, cutlass::bfloat16_t>::value) { asm volatile( "{\n\t" " .reg .f32 tmp;\n\t" " shl.b32 tmp, %1, 16;\n\t" " add.f32 %0, tmp, %0;\n\t" " and.b32 tmp, %1, 0xffff0000;\n\t" " add.f32 %0, tmp, %0;\n\t" " shl.b32 tmp, %2, 16;\n\t" " add.f32 %0, tmp, %0;\n\t" " and.b32 tmp, %2, 0xffff0000;\n\t" " add.f32 %0, tmp, %0;\n\t" "}\n\t" : "+f"(gemm_k_reduction[n_serpentine]) : "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1])); } else { assert(0); } #endif } if (kReduceKForA && (n == 0)) { #if 0 gemm_k_reduction[m * 2] += float(A[m * 8]); gemm_k_reduction[m * 2] += float(A[m * 8 + 1]); gemm_k_reduction[m * 2] += float(A[m * 8 + 4]); gemm_k_reduction[m * 2] += float(A[m * 8 + 5]); gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 2]); gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 3]); gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 6]); gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 7]); #else uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&A); if (platform::is_same<ElementA, cutlass::half_t>::value) { asm volatile( "{\n\t" " .reg .f16 low, high;\n\t" " .reg .f32 tmp;\n\t" " mov.b32 {low, high}, %2;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %0, tmp, %0;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %0, tmp, %0;\n\t" " mov.b32 {low, high}, %3;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %1, tmp, %1;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %1, tmp, %1;\n\t" " mov.b32 {low, high}, %4;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %0, tmp, %0;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %0, tmp, %0;\n\t" " mov.b32 {low, high}, %5;\n\t" " cvt.f32.f16 tmp, low;\n\t" " add.f32 %1, tmp, %1;\n\t" " cvt.f32.f16 tmp, high;\n\t" " add.f32 %1, tmp, %1;\n\t" "}\n\t" : "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1]) : "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3])); } else if (platform::is_same<ElementA, cutlass::bfloat16_t>::value) { asm volatile( "{\n\t" " .reg .f32 tmp;\n\t" " shl.b32 tmp, %2, 16;\n\t" " add.f32 %0, tmp, %0;\n\t" " and.b32 tmp, %2, 0xffff0000;\n\t" " add.f32 %0, tmp, %0;\n\t" " shl.b32 tmp, %3, 16;\n\t" " add.f32 %1, tmp, %1;\n\t" " and.b32 tmp, %3, 0xffff0000;\n\t" " add.f32 %1, tmp, %1;\n\t" " shl.b32 tmp, %4, 16;\n\t" " add.f32 %0, tmp, %0;\n\t" " and.b32 tmp, %4, 0xffff0000;\n\t" " add.f32 %0, tmp, %0;\n\t" " shl.b32 tmp, %5, 16;\n\t" " add.f32 %1, tmp, %1;\n\t" " and.b32 tmp, %5, 0xffff0000;\n\t" " add.f32 %1, tmp, %1;\n\t" "}\n\t" : "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1]) : "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3])); } else { assert(0); } #endif } } } #else assert(0); #endif } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { // // Define conversions from source type to instruction type // FloatRoundStyle const kRoundA = PreferredRoundingMode<typename ArchMmaOperator::ElementA, ElementA>::kRound; FloatRoundStyle const kRoundB = PreferredRoundingMode<typename ArchMmaOperator::ElementB, ElementB>::kRound; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA, FragmentA::kElements, kRoundA> convert_A; NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB, FragmentB::kElements / 2, kRoundB> convert_B; Array<ElementB, FragmentB::kElements / 2> const *ptr_B = reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B); Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> * ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *>(&dst_B); dst_A = convert_A(A); ptr_dst_B[0] = convert_B(ptr_B[0]); ptr_dst_B[1] = convert_B(ptr_B[1]); #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA, FragmentA::kElements / 2, kRoundA> convert_A; NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB, FragmentB::kElements, kRoundB> convert_B; Array<ElementA, FragmentA::kElements / 2> const *ptr_A = reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A); Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> * ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *>(&dst_A); dst_B = convert_B(B); ptr_dst_A[0] = convert_A(ptr_A[0]); ptr_dst_A[1] = convert_A(ptr_A[1]); #else assert(0); #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h", "repo_id": "cutlass", "token_count": 7946 }
43
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/layout/pitch_linear.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace layout { // template < // int ElementSize, // gemm::Operand Operand // > // struct VoltaTensorOpMultiplicandCongruous; // template < // int ElementSize, // gemm::Operand Operand // > // struct ColumnMajorVoltaTensorOpMultiplicandCongruous; // template < // int ElementSize, // gemm::Operand Operand // > // struct RowMajorVoltaTensorOpMultiplicandCongruous; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear memory. template <int ElementSize> struct VoltaTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; /// Fundamental tile shape in units of vectors using TileShape = PitchLinearShape<8, 4>; /// Fundamental partition shape in units of vectors using PartitionShape = PitchLinearShape<8, 2>; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; using PartitionCount = PitchLinearShape< TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided >; using AccessCount = PitchLinearShape< PartitionShape::kContiguous, PartitionShape::kStrided >; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandCongruous(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandCongruous(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static VoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return VoltaTensorOpMultiplicandCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { // First, compute c and s of vector within source (in units of vector accesses) int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; int vec_strided_idx = coord.strided(); // Compute the fundamental tile being accessed int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous; int tile_strided_idx = vec_strided_idx / TileShape::kStrided; int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous; int tile_strided_residual = vec_strided_idx % TileShape::kStrided; // Then swizzle in a tile // Swizzle pattern is (tid[2:0] << 2)|(tid[4:3] ^ tid[2:1]) int permuted_strided_within_tile = (tile_contiguous_residual >> 1); int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) | ((tile_contiguous_residual & 1) << 2); // Compute final element location int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile; return element_contiguous + element_strided * stride_[0]; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous template <int ElementSize> struct ColumnMajorVoltaTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return ColumnMajorVoltaTensorOpMultiplicandCongruous(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; /// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous template <int ElementSize> struct RowMajorVoltaTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return RowMajorVoltaTensorOpMultiplicandCongruous(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; /// Template based on element size (in bits) - defined in terms of pitch-linear memory. // template <int ElementSize, Operand Operand> template <int ElementSize> struct VoltaTensorOpMultiplicandBCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; /// Fundamental tile shape in units of vectors using TileShape = PitchLinearShape<8, 4>; /// Fundamental partition shape in units of vectors using PartitionShape = PitchLinearShape<4, 4>; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; using PartitionCount = PitchLinearShape< TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided >; using AccessCount = PitchLinearShape< PartitionShape::kContiguous, PartitionShape::kStrided >; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandBCongruous(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandBCongruous(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static VoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { return VoltaTensorOpMultiplicandBCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { // First, compute c and s of vector within source (in units of vector accesses) int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; int vec_strided_idx = coord.strided(); // Compute the fundamental tile being accessed int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous; int tile_strided_idx = vec_strided_idx / TileShape::kStrided; int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous; int tile_strided_residual = vec_strided_idx % TileShape::kStrided; // Then swizzle in a tile // Swizzle pattern is (tid[1:0] << 3)|(tid & 0x4)|(tid[1:0]) int permuted_strided_within_tile = (tile_contiguous_residual & 0x3); int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) | (tile_contiguous_residual & 0x4); // Compute final element location int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile; return element_contiguous + element_strided * stride_[0]; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous template <int ElementSize> struct ColumnMajorVoltaTensorOpMultiplicandBCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { return ColumnMajorVoltaTensorOpMultiplicandBCongruous(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; /// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous template <int ElementSize> struct RowMajorVoltaTensorOpMultiplicandBCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { return RowMajorVoltaTensorOpMultiplicandBCongruous(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and KBlock size (in elements). template <int ElementSize, int KBlock> struct VoltaTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 64b accesses static int const kAccessSize = 64; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; static int const kKBlock = KBlock; private: // // Data members // /// Stride data member. For GEMM, it equals to KBlock x stage. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE VoltaTensorOpMultiplicandCrosswise(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static VoltaTensorOpMultiplicandCrosswise packed(TensorCoord const &extent) { return VoltaTensorOpMultiplicandCrosswise(extent[1]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { // // First, compute c and s of vector within source (in units of vector // accesses) // int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; int vec_strided_idx = coord.strided(); // // Then swizzle // The mapping is like this: // id[1:0]|(id[3]^id[4])|id[2] int vec_strided_within_tile = vec_contiguous_idx & 0x7; int permuted_vec_contiguous = (vec_strided_idx & (~0xF)) + (vec_strided_idx & 0x3) * 4 + (((vec_strided_idx >> 2) ^ ((vec_strided_idx & 0x10) >> 3)) & 0x3); permuted_vec_contiguous ^= ((vec_strided_within_tile >> 1) & 0x3); int permuted_vec_strided = vec_contiguous_idx; // // Compute final element location // int element_contiguous = permuted_vec_contiguous * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); return element_contiguous + permuted_vec_strided * (stride_[0] * kElementsPerAccess); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[0] * stride_[0]; } }; /// Template mapping a column-major view of pitch-linear memory to /// VoltaTensorOpMultiplicandCrosswise template <int ElementSize, int KBlock> struct ColumnMajorVoltaTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>; /// This layout is optimized for 64b accesses static int const kAccessSize = Base::kAccessSize; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE ColumnMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorVoltaTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return ColumnMajorVoltaTensorOpMultiplicandCrosswise(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicandCrosswise template <int ElementSize, int KBlock> struct RowMajorVoltaTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>; /// This layout is optimized for 64b accesses static int const kAccessSize = Base::kAccessSize; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE RowMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorVoltaTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return RowMajorVoltaTensorOpMultiplicandCrosswise(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; } // namespace layout } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h/0
{ "file_path": "cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h", "repo_id": "cutlass", "token_count": 9595 }
44
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a structure containing strides, bounds, and a pointer to tensor data. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/platform/platform.h" #include "cutlass/subbyte_reference.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Default layout function from coordinates in a tensor's index space into the n-D array held /// in memory. /// /// All layout functions must define at least the members shown in IdentityTensorLayout<>. template <int Rank> class IdentityTensorLayout { public: /// Logical rank of tensor static int const kRank = Rank; /// Rank of stride vector static int const kStrideRank = Rank; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = Coord<kRank, Index>; /// Stride vector using Stride = Coord<kStrideRank, Index>; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // CUTLASS_HOST_DEVICE IdentityTensorLayout(Stride const &stride = Stride()): stride_(stride) { } /// Returns the offset of a coordinate in linear memory CUTLASS_HOST_DEVICE LongIndex operator()(Coord<Rank> const &coord) const { return coord.dot(stride_); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &size) const { int idx = stride_.max_dim_index(); return stride_[idx] * size[idx]; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank and layout within memory. A TensorRef combines a pointer and a Layout concept Examples: (These examples use helpers for matrix layouts defined in cutlass/layout/matrix.h) 1. Column-major matrix may be represented as a rank=2 tensor: TensorRef<float, layout::ColumnMajor> A(ptr_A, ldm); 2. Row-major matrix may be represented as a rank=2 tensor: TensorRef<float, layout::RowMajor> B(ptr_A, ldm); 3. An interleaved matrix may be represented as a rank=2 tensor: TensorRef<int8_t, layout::ColumnMajorInterleaved<32> > C; 4. A helper exists to define a TensorRef for a contiguous matrix whose layout is not known at compile time. int ldm; // leading dimension layout::Matrix kind; // Could be layout::Matrix::kRowMajor or layout::Matrix::kColumnMajor TensorRef<int, layout::ContiguousMatrix> E(ptr_E, {ldm, kind}); */ template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class TensorRef { public: /// Data type of individual access using Element = Element_; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; /// Reference type to an element using Reference = typename platform::conditional< sizeof_bits<Element>::value >= 8, Element &, SubbyteReference<Element> >::type; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// TensorRef to constant data using ConstTensorRef = TensorRef< typename platform::remove_const<Element>::type const, Layout>; /// TensorRef to non-constant data using NonConstTensorRef = TensorRef< typename platform::remove_const<Element>::type, Layout>; /// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a /// scalar, but degenerate cases such as these are difficult to accommodate without /// extensive C++ metaprogramming or support for zero-length arrays. static_assert(kRank > 0, "Cannot define a zero-rank TensorRef"); private: /// Pointer Element* ptr_; /// Layout object maps logical coordinates to linear offsets Layout layout_; public: // // Methods // /// Constructs a TensorRef with a pointer and layout object. CUTLASS_HOST_DEVICE TensorRef(): ptr_(nullptr) { } /// Constructs a TensorRef with a pointer and layout object. CUTLASS_HOST_DEVICE TensorRef( Element *ptr, ///< pointer to start of tensor Layout const &layout ///< layout object containing stride and mapping function ): ptr_(ptr), layout_(layout) { } /// Converting constructor from TensorRef to non-constant data. template<typename _Magic = int> CUTLASS_HOST_DEVICE TensorRef( NonConstTensorRef const &ref, ///< TensorRef to non-const data ///SFINAE trick to avoid creating a copy-constructor when Element_ is already non-const _Magic magic = (typename platform::enable_if< ! platform::is_same<NonConstTensorRef, TensorRef<Element_, Layout_> >::value, _Magic>::type)0 ): ptr_(ref.data()), layout_(ref.layout()) { } /// Returns a reference to constant-valued tensor. CUTLASS_HOST_DEVICE ConstTensorRef const_ref() const { return ConstTensorRef(ptr_, layout_); } CUTLASS_HOST_DEVICE NonConstTensorRef non_const_ref() const { return NonConstTensorRef(const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_); } /// Updates only the pointer CUTLASS_HOST_DEVICE void reset(Element* ptr = nullptr) { ptr_ = ptr; } /// Updates the pointer and layout object CUTLASS_HOST_DEVICE void reset(Element* ptr, Layout const &layout) { ptr_ = ptr; layout_ = layout; } /// Returns true if the TensorRef is non-null CUTLASS_HOST_DEVICE bool good() const { return ptr_ != nullptr; } /// Returns the pointer to referenced data CUTLASS_HOST_DEVICE Element * data() const { return ptr_; } /// Returns a reference to the element at a given linear index CUTLASS_HOST_DEVICE Reference data(LongIndex idx) const { return ReferenceFactory<typename platform::remove_const<Element>::type, (sizeof_bits<Element>::value < 8)>::get(ptr_, idx); } /// Returns the layout object CUTLASS_HOST_DEVICE Layout & layout() { return layout_; } /// Returns the layout object CUTLASS_HOST_DEVICE Layout layout() const { return layout_; } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE typename Layout::Stride::Index stride(int dim) const { return layout_.stride().at(dim); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE typename Layout::Stride::Index & stride(int dim) { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor CUTLASS_HOST_DEVICE LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference at(TensorCoord const& coord) const { return data(offset(coord)); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference operator[](TensorCoord const& coord) const { return data(offset(coord)); } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRef & add_pointer_offset(LongIndex offset_) { ptr_ += offset_; return *this; } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRef & add_coord_offset(TensorCoord const &coord) { add_pointer_offset(offset(coord)); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRef operator+(TensorCoord const& b) const { TensorRef result(*this); result.add_coord_offset(b); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRef & operator+=(TensorCoord const& b) { add_coord_offset(b); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRef operator-(TensorCoord const& b) const { TensorRef result(*this); result.add_pointer_offset(-offset(b)); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRef & operator-=(TensorCoord const& b) { add_pointer_offset(-offset(b)); return *this; } }; /// Constructs a TensorRef, deducing types from arguments. template < typename Element, typename Layout > CUTLASS_HOST_DEVICE TensorRef<Element, Layout> make_TensorRef(Element *ptr, Layout const &layout) { return TensorRef<Element, Layout>(ptr, layout); } /////////////////////////////////////////////////////////////////////////////////////////////////// // // Partial specializations to handle degenerate and sub-byte cases. // /////////////////////////////////////////////////////////////////////////////////////////////////// template < typename Element, typename Layout > CUTLASS_HOST_DEVICE bool TensorRef_aligned(TensorRef<Element, Layout> const &ref, int alignment) { int const kStrideRank = Layout::kStrideRank; if (reinterpret_cast<uintptr_t>(ref.data()) % alignment) { return false; } CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kStrideRank; ++i) { if (ref.stride(i) % alignment) { return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/include/cutlass/tensor_ref.h/0
{ "file_path": "cutlass/include/cutlass/tensor_ref.h", "repo_id": "cutlass", "token_count": 3815 }
45
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses. The first tile this iterator visits maybe partial, then the remaining tiles are complete. So, we only need to compute the predicates twice, once before the first tile and once for the remaining full tiles which can share the same predicates. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/permute.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIteratorPredicates /// template <typename Shape_, typename Element_, typename Layout_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIteratorPredicates { public: using Shape = Shape_; using Element = Element_; using Layout = Layout_; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = typename Layout::TensorCoord; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static int const kPredicatesPerByte = 4; static int const kPredicatesPerWord = 4 * kPredicatesPerByte; static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; /// Number of 32b words containing predicates static int const kPredicateByteCount = (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; static_assert(kPredicateWordCount <= 4, "Too many predicates."); /// Predicate vector stores mask to guard accesses using Mask = Array<uint32_t, kPredicateWordCount>; // private: /// Guard predicates uint32_t predicates_[kPredicateWordCount]; /// Size of tensor TensorCoord extent_; /// Initial offset for each thread TensorCoord thread_offset_; /// Offset to the first steady-state tile TensorCoord residue_offset_; /// Iteration along vectors implied by the thread map int iteration_vector_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int c = access_residual / kAccessesPerVector; int v = access_residual % kAccessesPerVector; TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, s * ThreadMap::Delta::kStrided); TensorCoord coord = thread_offset_ + iteration_coord; bool guard; if (is_steady_state) { if (kAdvanceRank == 0) { guard = (coord.strided() < extent.strided()); } else { guard = (coord.contiguous() < extent.contiguous()); } } else { guard = (coord.strided() < extent.strided() && coord.contiguous() < extent.contiguous()); } int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } } CUTLASS_HOST_DEVICE void set_predicates(int thread_id, TensorCoord const &threadblock_offset) { TensorCoord residue_extent; if (kAdvanceRank) { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided; if (!residue_size) { residue_size = Shape::kStrided; } residue_offset_ = make_Coord(0, residue_size); residue_extent = make_Coord( extent_.contiguous(), min(threadblock_offset.strided() + residue_size, extent_.strided()) ); } else { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous; if (!residue_size) { residue_size = Shape::kContiguous; } residue_offset_ = make_Coord(residue_size, 0); residue_extent = make_Coord( min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size), extent_.strided() ); } // Per-thread offset in logical coordinates of tensor thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); compute_predicates_(residue_extent, false); set_iteration_index(0); } /// Default constructor PredicatedTileAccessIteratorPredicates() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorPredicates( /// Extent of tensor TensorCoord extent) : extent_(extent) { } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorPredicates &operator++() { return *this; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = enable ? 0u : predicates_[i]; } } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0xffffffff; } } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = mask[i]; } } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = predicates_[i]; } } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() const { int pred_idx = iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; return pred; } }; //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIterator /// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType, bool Gather = false, typename PermuteLayout = layout::NoPermute> class PredicatedTileAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for pitch-linear data. /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static bool constexpr Permute = !platform::is_same<PermuteLayout, layout::NoPermute>::value && !platform::is_same<PermuteLayout, layout::InversePermute<layout::NoPermute>>::value; using Mask = typename UnderlyingPredicates::Mask; /// Uses a non-template class struct Params : PredicatedTileAccessIteratorParams { using Base = PredicatedTileAccessIteratorParams; /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : Base(layout.stride(0), MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // UnderlyingPredicates the_predicates; /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Used for out-of-order visitation bool is_residue_tile_; /// Below is used when Gather is turned on. We need to record strided_offset /// and contiguous_offset separated to compute the offset by using /// /// offset = contiguous_offset + indices[strided_offset] /// Gather indices int const *indices_; /// Function to perform layout permutation and offset computation PermuteLayout permute_layout_; /// Tracks thread's coordinate offset in the matrix for current tile. /// This is only used in the following cases: /// - when Gather is true, strided coordinate needed to access indices (contiguous offset is tracked via pointer_) /// - when Permute is true, both coordinates are neeeded as input into permutation function (pointer_ is fixed) TensorCoord coord_offset_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, /// Gather indices int const *indices = nullptr) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent), is_residue_tile_(true), indices_(indices), permute_layout_(TensorCoord(extent.contiguous(), extent.strided()), params.stride_) { the_predicates.set_predicates(thread_id, threadblock_offset); if (Gather) { assert(indices_); } // update internal pointers Layout layout(params_.stride_); if (!Gather && !Permute) { add_pointer_offset(layout(the_predicates.thread_offset_)); } else { coord_offset_ = the_predicates.thread_offset_; if (!Permute) { add_pointer_offset(layout(make_Coord(coord_offset_.contiguous(), 0))); } } } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { if (is_residue_tile_) { the_predicates.thread_offset_ += the_predicates.residue_offset_; the_predicates.compute_predicates_(the_predicates.extent_, true); Layout layout(params_.stride_); if (!Gather && !Permute) { add_pointer_offset(layout(the_predicates.residue_offset_)); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1); pointer_ += Shape::kContiguous * tile_offset.contiguous() * sizeof_bits<Element>::value / 8; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1); pointer_ += Shape::kStrided * tile_offset.strided() * sizeof_bits<Element>::value / 8; } } else { coord_offset_.strided() = the_predicates.thread_offset_.strided() + Shape::kStrided * (tile_offset.strided() - kAdvanceRank); if (!Permute) { add_pointer_offset(layout(make_Coord(the_predicates.residue_offset_.contiguous(), 0))); add_pointer_offset(Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank))); } else { coord_offset_.contiguous() = the_predicates.thread_offset_.contiguous() + Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank)); } } } else { if (!Gather && !Permute) { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { coord_offset_.strided() += Shape::kStrided * tile_offset.strided(); if (!Permute) { add_pointer_offset(Shape::kContiguous * tile_offset.contiguous()); } else { coord_offset_.contiguous() += Shape::kContiguous * tile_offset.contiguous(); } } } is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { if (Gather || Permute) { if (!valid()) { return nullptr; } Index coord_contig = (Permute ? coord_offset_.contiguous() : 0) + the_predicates.iteration_contiguous_ * ThreadMap::Delta::kContiguous + the_predicates.iteration_vector_ * AccessType::kElements; Index coord_strided = coord_offset_.strided() + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided; if (Gather) { coord_strided = indices_[coord_strided]; } LongIndex offset = Permute ? permute_layout_(TensorCoord(coord_contig, coord_strided)) : (coord_strided * LongIndex(params_.stride_) + coord_contig); return reinterpret_cast<AccessType *>(pointer_ + OffsetBytes<Element>(offset)); } return reinterpret_cast<AccessType *>( pointer_ + the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + the_predicates.iteration_vector_; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } // Enter here only if (iteration_contiguous_ == ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { if (!Gather && !Permute) { pointer_ += params_.inc_strided_; } return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; if (!Gather && !Permute) { // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; } return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() const { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType, Gather, PermuteLayout>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()), indices) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType, Gather, PermuteLayout>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, /// Gather indices int const *indices = nullptr) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()), indices) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank 2 data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRankN<2>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRankN<2>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingPredicates::Mask; /// Parameters object is precomputed state and is host-constructible class Params { public: friend PredicatedTileAccessIterator; private: /// stride of pitch-linear layout (units of Element) Coord<Layout::kStrideRank, Layout::LongIndex> stride_; /// amount (in byte) to increment pointer to move to next access along /// contiguous dimension LongIndex inc_contiguous_; /// amount (in byte) to increment pointer from first access of current /// contiguous dimension to first access of next one. LongIndex inc_strided_; /// amount (in byte) to increment pointer from last access of current /// contiguous dimension to first access of next one. LongIndex inc_next_strided_; /// amount (in byte) to increment pointer from last access to first access /// of next tile LongIndex inc_next_; /// amount (in byte) to increment pointer from first access of current tile /// to first access of next tile LongIndex inc_advance_; public: // Default ctor CUTLASS_HOST_DEVICE Params(): stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : stride_({layout.stride(0), layout.stride(1)}) { inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) * sizeof_bits<Element>::value / 8; inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) * sizeof_bits<Element>::value / 8; inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_; if (kAdvanceRank) { // advance along strided dimension inc_advance_ = Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8; } else { // advance along contiguous dimension inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8; } inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_; }; }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; // // Data members // /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; UnderlyingPredicates the_predicates; /// Used for out-of-order visitation bool is_residue_tile_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent), is_residue_tile_(true) { the_predicates.set_predicates(thread_id, threadblock_offset); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(the_predicates.thread_offset_)); } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { if (is_residue_tile_) { the_predicates.thread_offset_ += the_predicates.residue_offset_; Layout layout(params_.stride_); add_pointer_offset(layout(the_predicates.residue_offset_)); the_predicates.compute_predicates_(the_predicates.extent_, true); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1] - 1); pointer_ += Shape::kContiguous * tile_offset[0]; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0] - 1); pointer_ += Shape::kStrided * tile_offset[1]; } } else { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]); pointer_ += Shape::kContiguous * tile_offset[0]; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]); pointer_ += Shape::kStrided * tile_offset[1]; } } is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(pointer_) + the_predicates.iteration_vector_; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { pointer_ += params_.inc_contiguous_; return *this; } // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_next_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank 2 column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank-2 row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2RowMajor, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for column-major interleaved data. /// It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row() * kInterleavedK, extent.column() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.row() * kInterleavedK, threadblock_offset.column() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major interleaved data. // It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::RowMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column() * kInterleavedK, extent.row() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.column() * kInterleavedK, threadblock_offset.row() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h", "repo_id": "cutlass", "token_count": 25112 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile first, with the objective of minimizing predicate mask updates during steady-state operation. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "regular_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8 > class RegularTileIterator2dThreadTile; /// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the contiguous or strided dimensions."); private: // // Types // using AccessType = AlignedArray<Element, ThreadMap::ThreadAccessShape::kCount, kAlignment>; // // Data members // /// Pointer to memory uint8_t *pointer_; /// Stride quantity StrideIndex stride_; /// Amount to increment pointer along strided dimension LongIndex increment_strided_; /// Amount to advance pointer between tiles LongIndex increment_advance_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx, int interleave ){ TensorCoord t = ThreadMap::initial_offset(thread_idx); long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave; pointer_ = reinterpret_cast<uint8_t *>(ref.data() + offset); stride_ = ref.stride()[0] / interleave; increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value / 8) * ThreadMap::Delta::kStrided / interleave; increment_advance_ = (kAdvanceRank == 0 ? Shape::kContiguous * sizeof_bits<Element>::value / 8 : Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8) / interleave); } /// Loads a fragment CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided]; } if (s + 1 < ThreadMap::Iterations::kStrided) { byte_pointer += increment_strided_; } } } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { load_with_pointer_offset( frag, tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + tile_offset.strided() * Shape::kStrided * stride_ ); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag); uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int idx = c + s * ThreadMap::Iterations::kContiguous; access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx]; } if (s + 1 < ThreadMap::Iterations::kStrided) { byte_pointer += increment_strided_; } } } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { store_with_pointer_offset( frag, tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_ ); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { pointer_ += increment_advance_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { pointer_ -= increment_advance_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { int offset = sizeof_bits<Element>::value * (coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8; add_pointer_offset(offset); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::RowMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorInterleaved<4>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; using Underlying = RegularTileIterator2dThreadTile< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, kAlignment >; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the row or column dimensions."); private: Underlying iterator_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile() { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx ): iterator_({ref.data(), ref.stride()}, thread_idx, 4) { } /// Loads a fragment CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { iterator_.load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { iterator_.store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { --iterator_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<4>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >; using Underlying = RegularTileIterator2dThreadTile< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap >; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the row or column dimensions."); private: Underlying iterator_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile() { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx ): iterator_({ref.data(), ref.stride()}, thread_idx, 4) { } /// Loads a fragment CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { iterator_.load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { iterator_.store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { --iterator_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass
cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h", "repo_id": "cutlass", "token_count": 5171 }
47
# CuTe Tensor algorithms This section summarizes the interfaces and implementations of common numerical algorithms performed on `Tensor`s. The implementation of these algorithms may be found in the [include/cute/algorithm/](../../../include/cute/algorithm/) directory. ## `copy` CuTe's `copy` algorithm copies the elements of a source `Tensor` into the elements of a destination `Tensor`. The various overloads of `copy` can be found in [`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp). ### Interface and specialization opportunities A `Tensor` encapsulates the data type, data location, and possibly also the shape and stride of the tensor at compile time. As a result, `copy` can and does dispatch, based on the types of its arguments, to use any of various synchronous or asynchronous hardware copy instructions. The `copy` algorithm has two main overloads. The first just takes the source `Tensor` and the destination `Tensor`. ```c++ template <class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void copy(Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> & dst); ``` The second takes those two parameters, plus a `Copy_Atom`. ```c++ template <class... CopyArgs, class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void copy(Copy_Atom<CopyArgs...> const& copy_atom, Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> & dst); ``` The two-parameter `copy` overload picks a default implementation based only on the types of the two `Tensor` parameters. The `Copy_Atom` overload lets callers override that default by specifying a nondefault `copy` implementation. ### Parallelism and synchronization depend on parameter types Either the default implementation or the implementation selected by a `Copy_Atom` overload may use none or all available parallelism, and may have a variety of synchronization semantics. The behavior depends on `copy`'s parameter types. Users are expected to figure this out based on their knowledge of the architecture on which they are running. (Developers often write a custom optimized kernel for each GPU architecture.) The `copy` algorithm may be sequential per thread, or it may be parallel across some collection of threads (e.g., a block or cluster). If `copy` is parallel, then the collection of participating threads may need synchronization before any thread in the collection may assume that the copy operation has completed. For example, if the participating threads form a thread block, then users must invoke `__syncthreads()` or the Cooperative Groups equivalent before they may use the results of `copy`. The `copy` algorithm may use asynchronous copy instructions, such as `cp.async`, or its C++ interface `memcpy_async`. In that case, users will need to perform the additional synchronization appropriate to that underlying implementation before they may use the results of the `copy` algorithm. [The CuTe GEMM tutorial example](../../../examples/cute/tutorial/) shows one such synchronization method. More optimized GEMM implementations use pipelining techniques to overlap asynchronous `copy` operations with other useful work. ### A generic copy implementation A simple example of a generic `copy` implementation for any two `Tensor`s looks like this. ```c++ template <class TA, class ALayout, class TB, class BLayout> CUTE_HOST_DEVICE void copy(Tensor<TA, ALayout> const& src, // Any logical shape Tensor<TB, BLayout> & dst) // Any logical shape { for (int i = 0; i < size(src); ++i) { dst(i) = src(i); } } ``` This generic `copy` algorithm addresses both `Tensor`s with 1-D logical coordinates, thus traversing both `Tensor`s in a logical column-major order. Some reasonable architecture-independent optimizations would include the following. 1. If the two `Tensor`s have known memory spaces with optimized access instructions (like `cp.async`), then dispatch to the custom instruction. 2. The two `Tensor`s have static layouts and it can be proven that element vectorization is valid -- for example, four `ld.global.b32`s can be combined into a single `ld.global.b128` -- then vectorize the source and destinations tensors. 3. If possible, validate that the copy instruction to be used is appropriate for the source and destination tensors. CuTe's optimized copy implementations can do all of these. ## `copy_if` CuTe's `copy_if` algorithm lives in the same header as `copy`, [`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp). The algorithm takes source and destination `Tensor` parameters like `copy`, but it also takes a "predication `Tensor`" with the same shape as the input and output. Elements of the source `Tensor` are only copied if the corresponding predication `Tensor` element is nonzero. For details on why and how to use `copy_if`, please refer to the ["predication" section of the tutorial](./0y_predication.md). ## `gemm` ### What `gemm` computes The `gemm` algorithm takes three `Tensor`s, A, B, and C. What it does depends on the number of modes that its `Tensor` parameters have. We express these modes using letters. * V indicates a "vector," a mode of independent elements. * M and N indicate the number of rows resp. columns of the matrix result C of the BLAS's GEMM routine. * K indicates the "reduction mode" of GEMM, that is, the mode along which GEMM sums. Please see the [GEMM tutorial](./0x_gemm_tutorial.md) for details. We list the modes of the input `Tensor`s A and B, and the output `Tensor` C, using a notation `(...) x (...) => (...)`. The two leftmost `(...)` describe A and B (in that order), and the `(...)` to the right of the `=>` describes C. 1. `(V) x (V) => (V)`. The element-wise product of vectors: C<sub>v</sub> += A<sub>v</sub> B<sub>v</sub>. Dispatches to FMA or MMA. 2. `(M) x (N) => (M,N)`. The outer product of vectors: C<sub>mn</sub> += A<sub>m</sub> B_<sub>n</sub>. Dispatches to (4) with V=1. 3. `(M,K) x (N,K) => (M,N)`. The product of matrices: C<sub>mn</sub> += A<sub>mk</sub> B<sub>nk</sub>. Dispatches to (2) for each K. 4. `(V,M) x (V,N) => (V,M,N)`. The batched outer product of vectors: C<sub>vmn</sub> += A<sub>vm</sub> B<sub>vn</sub>. Optimizes for register reuse and dispatches to (1) for each M, N. 5. `(V,M,K) x (V,N,K) => (V,M,N)`. The batched product of matrices: C<sub>vmn</sub> += A<sub>vmk</sub> B<sub>vnk</sub>. Dispatches to (4) for each K. Please refer to the [GEMM tutorial](./0x_gemm_tutorial.md) for an overview of CuTe's convention for ordering the modes. For example, if K appears, it always appears rightmost ("outermost"). If V appears, it always appears leftmost ("innermost"). ### Dispatch to optimized implementations Just like with `copy`, CuTe's implementations of `gemm` uses its `Tensor` arguments' types to dispatch to an appropriately optimized implementation. Also like `copy`, `gemm` takes an optional `MMA_Atom` parameter that lets callers override the default `FMA` instruction that CuTe would select based on the `Tensor` arguments' types. For more information on `MMA_Atom` and on specialization of `gemm` for different architectures, please refer to the [MMA section of the tutorial](./0t_mma_atom.md). ## `axpby` The `axpby` algorithm lives in the header file [`include/cute/algorithm/axpby.hpp`](../../../include/cute/algorithm/axpby.hpp). It assigns to $y$ the result of $\alpha x + \beta y$, where $\alpha$ and $\beta$ are scalars and $x$ and $y$ are `Tensor`s. The name stands for "Alpha times X Plus Beta times Y," and is a generalization of the original BLAS "AXPY" routine ("Alpha times X Plus Y"). ## `fill` The `fill` algorithm lives in the header file [`include/cute/algorithm/fill.hpp`](../../../include/cute/algorithm/fill.hpp). It overwrites the elements of its `Tensor` output argument with a given scalar value. ## `clear` The `clear` algorithm lives in the header file [`include/cute/algorithm/clear.hpp`](../../../include/cute/algorithm/clear.hpp). It overwrites the elements of its `Tensor` output argument with zeros. ## Other algorithms CuTe provides other algorithms. Their header files can be found in the [`include/cute/algorithm`](../../../include/cute/algorithm) directory.
cutlass/media/docs/cute/04_algorithms.md/0
{ "file_path": "cutlass/media/docs/cute/04_algorithms.md", "repo_id": "cutlass", "token_count": 2578 }
48
# Synchronization primitives ## Overview of CUDA's synchronization methods The CUDA programming model provides 3 abstractions: * hierarchical parallelism -- that is, parallel threads grouped into hierarchical units such as blocks and clusters; * shared memory, through which parallel threads that are in the same hierarchical unit can communicate; and * synchronization methods for threads. These abstractions help developers extract both fine-grained and coarse-grained parallelism, by making it possible for them to subdivide problems into independent components, and to insert synchronization at appropriate points. Over the years CUDA has introduced several synchronization primitives that operate at different levels of the hierarchy. These include * [thread block - level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions) synchronization (e.g., `__syncthreads()`); * [warp-level](https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/) synchronization (e.g., `__syncwarp()`); and * [thread-level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#memory-fence-functions) fence operations. As an extension to this, starting with the Hopper architecture, CUDA added the following improvements: * [thread block clusters](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-block-clusters) -- a new level in the thread hierarchy representing a group of thread blocks that can coordinate and share data; * synchronization instructions for a thread block cluster and threads within a cluster scope. ## CUTLASS's abstractions for Hopper features CUTLASS now includes abstractions for the following features introduced in Hopper. 1. Thread block cluster - level synchronization and query [APIs](/include/cute/arch/cluster_sm90.hpp) 2. Abstractions for new [barrier instructions](/include/cutlass/arch/barrier.h) which help with efficient synchronization of threads within a thread block cluster. ### Asynchronous pipelines In order to write a performant GEMM Kernel, software pipelining is critical to hide the latency of global memory loads. (Please refer to the [Efficient GEMM](/media/docs/efficient_gemm.md#pipelining) document.) Different threads or groups of threads may have different roles in the pipeline. Some are "producers" that load data or perform computations to satisfy other threads' input data dependencies. The same or different threads may be "consumers" that do other work with those input data dependencies, once they are satisfied. Starting with the Hopper architecture, the presence of hardware-accelerated synchronization instructions make it possible for "producer" and "consumer" threads to communicate with each other efficiently about their data dependencies. Implementing a persistent GEMM algorithm calls for managing dozens of different kinds of asynchronously executing operations that synchronize using multiple barriers organized as a circular list. This complexity is too much for human programmers to manage by hand. As a result, we have developed [asynchronous Pipeline classes](/include/cutlass/pipeline/). These classes help developers orchestrate a pipeline of asynchronous producer and consumer threads, without needing to worry about lower-level hardware details. These classes serve a similar function as the various [pipeline abstractions](https://nvidia.github.io/libcudacxx/extended_api/synchronization_primitives/pipeline.html) in libcu++. #### Pipeline methods ##### Producer acquire The `producer_acquire` method is to be used by asynchronous producer threads before issuing other instructions associated with a particular pipeline stage (e.g., copy or write). This is a blocking instruction which blocks further execution of consumer threads unless the particular stage waiting to be acquired is released by a consumer. We say that a pipeline at its start is "empty" if producer threads are free to produce and do not need to wait for a consumer release -- that is, if an acquire operation is expected to succeed. If the pipeline at its start is empty, then we can either skip performing producer acquire operations during the first pass through the pipeline stages, or use the `make_producer_start_state` method. The latter ensures that the acquire operation will succeed at the start of a pipeline. ##### Producer commit The `producer_commit` method is to be issued by asynchronous producer threads after the instructions associated with a particular stage (e.g., shared memory writes) have completed, in order to notify the waiting asynchronous consumer threads. This is a nonblocking instruction. This API may result in a No-Op in some cases, if the producer instructions also update the barrier stage associated automatically (e.g., TMA_based producer threads using the `PipelineTmaAsync ` class). ##### Consumer wait The `consumer_wait` method is to be used by consumer threads before consuming data from a particular pipeline stage which is expected to be produced by producer threads. This is a blocking instruction. That is, until the producer threads have committed to a particular stage, this instruction is expected to block further execution of consumer threads. ##### Consumer release The `consumer_release` method is to be used by consumer threads to signal waiting producer threads that they have finished consuming data associated with a particular stage of the pipeline. This is a nonblocking instruction. #### Pipeline example ```c++ // 4-stage Pipeline static constexpr int NumStages = 4; using MainloopPipeline = typename cutlass::PipelineAsync<NumStages>; using PipelineState = typename cutlass::PipelineState<NumStages>; // 2 producer threads and 1 consumer thread typename MainloopPipeline::Params params; params.producer_arv_count = 2; params.consumer_arv_count = 1; MainloopPipeline pipeline(shared_storage.storage, params); // Producer threads if (thread_idx == 0 or thread_idx == 1) { PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>(); for ( ; iter > 0; --iter) { pipeline.producer_acquire(smem_pipe_write); // Producer ops // If any memory operations are involved, then we also need // to guarantee that writes are completed and visible to consumer(s). pipeline.producer_commit(smem_pipe_write); ++smem_pipe_write; } } else if (thread_idx == 2) { PipelineState smem_pipe_read; for (; iter > 0; --iter) { pipeline.consumer_wait(smem_pipe_read); // Consumer ops pipeline.consumer_release(smem_pipe_read); ++smem_pipe_read; } } ``` In this example, we create an instance of the asynchronous pipeline class `PipelineSync`, and then synchronize among 3 asynchronously executing threads: 2 producer threads and 1 consumer thread. Please note that this is a basic example. There are different versions possible, depending on what the producer and consumer threads are doing. Please refer to our [unit tests](/test/unit/pipeline) and the other [pipeline classes](/include/cutlass/pipeline/pipeline.hpp) for more details. # Copyright Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/pipeline.md/0
{ "file_path": "cutlass/media/docs/pipeline.md", "repo_id": "cutlass", "token_count": 2277 }
49
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import ctypes from cutlass_library import SubstituteTemplate import numpy as np from scipy.special import erf from cutlass_library import DataType, DataTypeTag from cutlass.backend.c_types import MatrixCoord_ from cutlass.backend.frontend import NumpyFrontend from cutlass.backend.library import ActivationOp, ActivationOpTag from cutlass.utils.datatypes import is_numpy_tensor, is_torch_available, is_torch_tensor dtype2ctype = { DataType.f16: ctypes.c_uint16, DataType.f32: ctypes.c_float, DataType.f64: ctypes.c_double, DataType.s8: ctypes.c_int8, DataType.s32: ctypes.c_int32 } if is_torch_available(): import torch import torch.nn.functional as F def get_scalar(value): """ Returns a scalar value from a container (e.g., np.ndarray) """ if is_numpy_tensor(value): if value.size != 1: raise Exception("Scalars used in epilogue must be of size 1") return value.reshape(-1)[0] elif is_torch_tensor(value): if value.size != 1: raise Exception("Scalars used in epilogue must be of size 1") return value.reshape(-1)[0] else: return value def to_ctype_value(value, dtype): """ Converts ``value`` to the corresponding storage needed for the ctype that will store ``value``. """ scalar = get_scalar(value) if dtype == DataType.f16: # Convert f16 value into an integer return int.from_bytes(np.float16(scalar).tobytes(), "little") else: return scalar ################################################################################################# # # Epilogue Functors # ################################################################################################# class EpilogueFunctorBase: """ Base class for thread-level epilogue functors """ def __init__(self) -> None: pass def emit(self, tag, template_argument): template = """${tag}<${arguments}>""" arguments = "" for idx, arg in enumerate(template_argument): arguments += arg if idx < len(template_argument) - 1: arguments += ", " values = { "tag": tag, "arguments": arguments, } return SubstituteTemplate(template, values) class LinearCombination(EpilogueFunctorBase): """ Apply a linear combination operator to an array of elements D = alpha * accumulator + beta * source :param element_output: data type used to load and store tensors :param epilogue_vector_length: number of elements computed per operation. Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes when there are not enough data to store :param element_accumulator: Accumulator data type :param element_epilogue: data type used to compute linear combination """ tag = "cutlass::epilogue::thread::LinearCombination" def __init__( self, element_output, epilogue_vector_length, element_accumulator=None, element_epilogue=None) -> None: super().__init__() if element_accumulator is None: element_accumulator = element_output if element_epilogue is None: element_epilogue = element_output self.element_output = element_output self.element_accumulator = element_accumulator self.element_epilogue = element_epilogue self.epilogue_vector_length = epilogue_vector_length self.template_arguments = [ DataTypeTag[element_output], str(epilogue_vector_length), DataTypeTag[element_accumulator], DataTypeTag[element_epilogue], ] c_element_epilogue = dtype2ctype[self.element_epilogue] element_epilogue = self.element_epilogue class _EpilogueOutputOpParamsEVT(ctypes.Structure): """ Epilogue params when using the default linear combination of EVT, which does not currently use {alpha,beta}_ptr_array """ _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ] def __init__(self, alpha, beta, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) class _EpilogueOutputOpParams(ctypes.Structure): _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ("alpha_ptr_array", ctypes.c_void_p), ("beta_ptr_array", ctypes.c_void_p), ] def __init__(self, alpha, beta, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) def to_evt_params(self) -> _EpilogueOutputOpParamsEVT: return _EpilogueOutputOpParamsEVT(self.alpha, self.beta) self.epilogue_type = _EpilogueOutputOpParams self.epilogue_type_evt = _EpilogueOutputOpParamsEVT def emit(self): return super().emit(self.tag, self.template_arguments) class LinearCombinationClamp(LinearCombination): """ Applies a linear combination operator to an array of elements then clamps the output before converting to the output element type. D = alpha * accumulator + beta * source + uniform :param element_output: data type used to load and store tensors :param epilogue_vector_length: number of elements computed per operation. Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes when there are not enough data to store :param element_accumulator: Accumulator data type :param element_epilogue: data type used to compute linear combination """ tag = "cutlass::epilogue::thread::LinearCombinationClamp" def __init__( self, element_output, epilogue_vector_length, element_accumulator=None, element_epilogue=None) -> None: # Base constructor super().__init__( element_output, epilogue_vector_length, element_accumulator, element_epilogue, ) c_element_epilogue = dtype2ctype[self.element_epilogue] element_epilogue = self.element_epilogue class _EpilogueOutputOpParams(ctypes.Structure): _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ] def __init__(self, alpha, beta, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) self.epilogue_type = _EpilogueOutputOpParams class FastLinearCombinationClamp(EpilogueFunctorBase): """ Applies a linear combination operator to an array of elements then clamps the output before converting to the output element type. D = alpha * accumulator + beta * source Note: The below method only when problem_size_K <= 256 for signed int8 gemm or problem_size_K <= 128 for unsigned int8 gemm. The default approach is above. :param element_output: data type used to load and store tensors :param epilogue_vector_length: number of elements computed per operation. Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes when there are not enough data to store """ tag = "cutlass::epilogue::thread::FastLinearCombinationClamp" def __init__(self, element_output, epilogue_vector_length, *args) -> None: super().__init__() self.template_arguments = [ DataTypeTag[element_output], str(epilogue_vector_length) ] self.element_accumulator = DataType.s32 self.element_epilogue = DataType.f32 # get epilogue output op c_element_epilogue = dtype2ctype[self.element_epilogue] element_epilogue = self.element_epilogue class _EpilogueOutputOpParams(ctypes.Structure): _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ] def __init__(self, alpha, beta, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) self.epilogue_type = _EpilogueOutputOpParams def emit(self): return super().emit(self.tag, self.template_arguments) class LinearCombinationGeneric(LinearCombination): """ Applies a linear combination operator followed by an activation function to an array of elements. D = activation(alpha * accumulator + beta * source) :param activation_functor: input activation functor :param element_output: data type used to load and store tensors :param epilogue_vector_length: number of elements computed per operation. Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes when there are not enough data to store :param element_accumulator: Accumulator data type :param element_epilogue: data type used to compute linear combination """ tag = "cutlass::epilogue::thread::LinearCombinationGeneric" def __init__( self, activation_functor, element_output, epilogue_vector_length, element_accumulator=None, element_epilogue=None) -> None: super().__init__( element_output, epilogue_vector_length, element_accumulator, element_epilogue, ) self.template_arguments = [ activation_functor.emit()] + self.template_arguments self.activation_functor = activation_functor self.element_epilogue = element_epilogue # get epilogue output op self.epilogue_type = self.activation_functor.epilogue_output_op(self.element_epilogue) class ActivationFunctor: """ Base class for frequently used activation functions """ @staticmethod def numpy(x: np.ndarray): raise NotImplementedError() @classmethod def emit(cls): return ActivationOpTag[cls.binding_type] @staticmethod def epilogue_output_op(element_epilogue): c_element_epilogue = dtype2ctype[element_epilogue] class _EpilogueOutputOpParams(ctypes.Structure): _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ] def __init__(self, alpha, beta, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) return _EpilogueOutputOpParams class ActivationMeta(type): @classmethod def __call__(cls, x, *args): if is_numpy_tensor(x): return cls.numpy(x, *args) elif is_torch_tensor(x): return cls.torch(x, *args) else: raise NotImplementedError("Unsupported tensor type") @classmethod def numpy(cls, *args): raise NotImplementedError(f"Numpy reference for {cls.__name__[:-4]} is not implemented.") @classmethod def torch(cls, *args): raise NotImplementedError(f"PyTorch reference for {cls.__name__[:-4]} is not implemented.") ############################################################################## # identity operator class identityMeta(ActivationMeta): @classmethod def numpy(cls, x): return x @classmethod def torch(cls, x): return x class identity(ActivationFunctor, metaclass=identityMeta): binding_type = ActivationOp.Identity ############################################################################## # ReLu operator class reluMeta(ActivationMeta): @classmethod def numpy(cls, x): return np.where(x > 0, x, 0) @classmethod def torch(cls, x): return F.relu(x) class relu(ActivationFunctor, metaclass=reluMeta): binding_type = ActivationOp.ReLU ############################################################################## # Leaky ReLu operator class leakyReLUMeta(ActivationMeta): @classmethod def numpy(cls, x, leaky_alpha): return np.maximum(x, 0) + np.minimum(x, 0) * leaky_alpha @classmethod def torch(cls, x, leaky_alpha): return F.leaky_relu(x, leaky_alpha) class leaky_relu(ActivationFunctor, metaclass=leakyReLUMeta): binding_type = ActivationOp.LeakyReLU @staticmethod def epilogue_output_op(element_epilogue): c_element_epilogue = dtype2ctype[element_epilogue] class _EpilogueOutputOpParams(ctypes.Structure): _fields_ = [ ("alpha", c_element_epilogue), ("beta", c_element_epilogue), ("alpha_ptr", ctypes.c_void_p), ("beta_ptr", ctypes.c_void_p), ("leaky_alpha", c_element_epilogue) ] def __init__(self, alpha, beta, leaky_alpha=0.2, *args) -> None: self.alpha = to_ctype_value(alpha, element_epilogue) self.beta = to_ctype_value(beta, element_epilogue) self.alpha_ptr = 0 self.beta_ptr = 0 self.leaky_alpha = to_ctype_value(leaky_alpha, element_epilogue) return _EpilogueOutputOpParams ############################################################################## # Tanh operator class tanhMeta(ActivationMeta): @classmethod def numpy(cls, x): return np.tanh(x) @classmethod def torch(cls, x): return torch.tanh(x) class tanh(ActivationFunctor, metaclass=tanhMeta): binding_type = ActivationOp.Tanh ############################################################################## # Sigmoid operator class sigmoidMeta(ActivationMeta): @classmethod def numpy(cls, x): return 1.0 / (1.0 + np.exp(-x)) @classmethod def torch(cls, x): return F.sigmoid(x) class sigmoid(ActivationFunctor, metaclass=sigmoidMeta): binding_type = ActivationOp.Sigmoid ############################################################################## # SiLu operator class siluMeta(ActivationMeta): @classmethod def numpy(cls, x): return x * sigmoidMeta.numpy() @classmethod def silu(cls, x): return F.silu(x) class silu(ActivationFunctor, metaclass=siluMeta): binding_type = ActivationOp.SiLU ############################################################################## # Hardswish operator class hardswishMeta(ActivationMeta): @classmethod def numpy(cls, x): relu6 = np.minimum(np.maximum(x + 3.0, 0), 6.0) return x * relu6 / 6.0 @classmethod def torch(cls, x): return F.hardswish(x) class hardswish(ActivationFunctor, metaclass=hardswishMeta): binding_type = ActivationOp.HardSwish ############################################################################## # GELU operator class geluMeta(ActivationMeta): @classmethod def numpy(cls, x): return 0.5 * x * (1 + erf(x / np.sqrt(2.0))) @classmethod def torch(cls, x): return F.gelu(x) class gelu(ActivationFunctor, metaclass=geluMeta): binding_type = ActivationOp.Gelu
cutlass/python/cutlass/backend/epilogue.py/0
{ "file_path": "cutlass/python/cutlass/backend/epilogue.py", "repo_id": "cutlass", "token_count": 7063 }
50
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Layout manipulation nodes and implementations The layout Nodes change the layout of intermediate nodes in epilogue visitor graph """ from copy import deepcopy from cutlass_library import LayoutType from pycute import product, flatten import cutlass from cutlass.backend.evt.ir.layout_algorithm import _list_to_tuple, _tuple_to_list from cutlass.backend.evt.ir.node import NodeBase from cutlass.backend.evt.ir.tensor import Tensor class PermutationImpl: """ Detailed implementation and helper functions for permutation """ def __init__(self, node) -> None: assert "indices" in node.kwargs.keys() self.indices = list(node.kwargs["indices"]) self.inverse_indices = self.get_inverse_indices(self.indices) def get_inverse_impl(self): inverse_impl = deepcopy(self) inverse_impl.indices = self.inverse_indices inverse_impl.inverse_indices = self.indices return inverse_impl def update(self, shape): num_dim = len(shape) indices = self.indices num_old_dim = len(indices) # Add offset for i, idx in enumerate(indices): indices[i] = idx + num_dim - num_old_dim # Add broadcast dims for i in range(num_dim - num_old_dim): indices = [i,] + indices self.indices = indices self.inverse_indices = self.get_inverse_indices(self.indices) def get_inverse_indices(self, indices): """ Get the indices for inverse permutation """ num_dim = len(indices) inverse_indices = [0] * num_dim for i in range(num_dim): inverse_indices[indices[i]] = i return inverse_indices def shape_propagation(self, input_node_meta): input_shape = input_node_meta.tensor.shape output_shape = tuple([input_shape[idx] for idx in self.indices]) return output_shape def broadcast(self, shape, node_meta: NodeBase): """ Broadcast the inputs based on current shape """ self.update(shape) inverse_shape = tuple([shape[idx] for idx in self.inverse_indices]) node_meta.tensor.broadcast(inverse_shape) def apply_to_user(self, usr_meta: NodeBase): """ Propagate the permutation to the users of the current nodes """ usr_meta.tensor.permute(self.inverse_indices) if hasattr(usr_meta, "store_tensor"): if usr_meta.store_tensor is not None: usr_meta.store_tensor.permute(self.inverse_indices) def apply_to_input(self, input_meta: NodeBase): """ Propagate the permutation to inputs of the current nodes """ input_meta.tensor.permute(self.indices) if hasattr(input_meta, "store_tensor"): if input_meta.store_tensor is not None: input_meta.store_tensor.permute(self.indices) class ReshapeImpl: """ Detailed implementation and helper functions for reshape """ def __init__(self, node) -> None: self.node = node assert "new_shape" in node.kwargs.keys() self.output_shape = _list_to_tuple(node.kwargs["new_shape"]) def get_inverse_impl(self): inverse_impl = deepcopy(self) inverse_impl.output_shape = self.input_shape inverse_impl.input_shape = self.output_shape return inverse_impl def shape_propagation(self, input_node_meta): self.input_shape = input_node_meta.tensor.shape return _list_to_tuple(self.output_shape) def broadcast(self, shape, node_meta: NodeBase): """ Broadcast the inputs based on current shape. """ # Step 1: infer split flatten_split_shape = self.infer_split(flatten(self.input_shape), flatten(self.output_shape)) split_input_shape = self.infer_merge(flatten_split_shape, self.input_shape) split_output_shape = self.infer_merge(flatten_split_shape, self.output_shape) # broadcast shape -> split_output_shape -> flatten_split_shape if len(shape) - len(split_output_shape) > 0: for _ in range(len(shape) - len(split_output_shape)): split_output_shape = [1,] + split_output_shape flatten_split_shape = [1,] + flatten_split_shape split_input_shape = [1,] + split_input_shape broadcast_factor = [] for dim, old_dim in zip(shape, split_output_shape): if not isinstance(dim, list): dim = [dim,] if not isinstance(old_dim, list): old_dim = [old_dim,] if product(tuple(dim)) == product(tuple(old_dim)): broadcast_factor += [1] * len(old_dim) elif product(tuple(old_dim)) == 1: assert len(dim) == 1 broadcast_factor.append(dim[0]) else: raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {dim}") # flatten_split_shape -> split_input_shape factor_idx = 0 broadcast_split_input_shape = [] for dim in split_input_shape: if isinstance(dim, list): new_dim = [] for d in dim: new_dim.append(d * broadcast_factor[factor_idx]) factor_idx += 1 broadcast_split_input_shape.append(new_dim) else: broadcast_split_input_shape.append(dim * broadcast_factor[factor_idx]) factor_idx += 1 broadcast_split_input_shape = _list_to_tuple(broadcast_split_input_shape) node_meta.tensor.reshape(_list_to_tuple(split_input_shape)) node_meta.tensor.broadcast(broadcast_split_input_shape) # Last reshape op to clean up broadcast_input_shape = tuple([product(dim) for dim in broadcast_split_input_shape]) node_meta.tensor.reshape(broadcast_input_shape) # Update the input shape and output shape self.input_shape = _list_to_tuple(node_meta.tensor.shape) self.output_shape = _list_to_tuple(shape) def apply_to_user(self, user_meta: NodeBase): """ Propagate the reshape to user nodes """ user_meta.tensor.reshape(tuple(self.input_shape)) if hasattr(user_meta, "store_tensor"): if user_meta.store_tensor is not None: user_meta.store_tensor.reshape(tuple(self.input_shape)) def apply_to_input(self, input_meta: NodeBase): """ Propagate the reshape to input nodes """ input_meta.tensor.reshape(tuple(self.output_shape)) if hasattr(input_meta, "store_tensor"): if input_meta.store_tensor is not None: input_meta.store_tensor.reshape(tuple(self.output_shape)) # # Helper functions # def infer_split(self, input_shape, output_shape): """ Infer the flatten splitted shape that can be merged to both input_shape and output_shape """ input_shape = _tuple_to_list(input_shape) output_shape = _tuple_to_list(output_shape) if len(input_shape) == 0 and len(output_shape) == 0: return [] if len(input_shape) == 0: if product(tuple(output_shape)) != 1: raise ValueError("Invalid reshape size") else: return output_shape if len(output_shape) == 0: if product(tuple(input_shape)) != 1: raise ValueError("Invalid reshape size") else: return input_shape # This is done recursively by only process the last dimension at each time old_dim = input_shape[-1] new_dim = output_shape[-1] # Exact match if old_dim == new_dim: return self.infer_split(input_shape[:-1], output_shape[:-1]) + [new_dim,] # Needs split if old_dim > new_dim and old_dim % new_dim == 0: residual = old_dim // new_dim return self.infer_split(input_shape[:-1] + [residual,], output_shape[:-1]) + [new_dim,] # Needs merge if old_dim < new_dim and new_dim % old_dim == 0: residual = new_dim // old_dim return self.infer_split(input_shape[:-1], output_shape[:-1] + [residual,]) + [old_dim,] raise NotImplementedError(f"Unsupported split: {input_shape} -> {output_shape}") def infer_merge(self, flatten_shape, shape): flatten_shape = _tuple_to_list(flatten_shape) shape = _tuple_to_list(shape) idx_flat = len(flatten_shape) - 1 merged_shape = [] for dim in reversed(shape): # Exact match if dim == flatten_shape[idx_flat]: merged_shape.append(dim) idx_flat -= 1 # need group elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0: residual = dim group = [] while(residual > 1): group.append(flatten_shape[idx_flat]) residual = residual // flatten_shape[idx_flat] idx_flat -= 1 merged_shape.append(group[::-1]) else: raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}") return merged_shape[::-1] class LayoutNode(NodeBase): """ Layout manipulation nodes """ fn_to_impl = { "permute": PermutationImpl, "reshape": ReshapeImpl } def __init__(self, name: str, fn, kwargs: dict) -> None: super().__init__(name) self.op = "layout" self.fn = fn self.kwargs = kwargs self.underlying_impl = self.fn_to_impl[self.fn.__name__](self) def get_inverse_node(self): inverse_node = deepcopy(self) inverse_node.underlying_impl = self.underlying_impl.get_inverse_impl() return inverse_node def shape_propagation(self, input_node_metas): if self._tensor is not None: return assert len(input_node_metas) == 1, "Layout node can only have one input node" output_shape = self.underlying_impl.shape_propagation(input_node_metas[0]) self._tensor = Tensor( element=self.element_output, shape=output_shape, layout_tag=LayoutType.RowMajor ) return super().shape_propagation(input_node_metas) def type_propagation(self, input_node_metas: 'list[NodeBase]'): """ The store nodes has element_output = element_input """ assert len(input_node_metas) == 1, "Layout node can only have one input node" self.element_output = input_node_metas[0].element_output def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'): """ Propagate the broadcast in the reversed topological order """ if self.tensor is None: raise RuntimeError(f"The tensor of node {self.name} is unknown.") shape = self.tensor.shape for child in input_node_metas: self.underlying_impl.broadcast(shape, child) def apply_to_user(self, usr_meta: NodeBase): """ Propagate the permutation to user nodes """ self.underlying_impl.apply_to_user(usr_meta) def apply_to_input(self, input_meta: NodeBase): """ Propagate the permutation to input nodes """ self.underlying_impl.apply_to_input(input_meta)
cutlass/python/cutlass/backend/evt/ir/layout_nodes.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/ir/layout_nodes.py", "repo_id": "cutlass", "token_count": 5691 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Compute the shared memory size in bytes """ import cutlass_library from pycute import shape_div, product import cutlass from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR from cutlass.backend.library import DataTypeSize class GetSmemSize: """ Get the size in byte of shared memory used by the kernel """ def __init__(self, dag_ir: DAGIR) -> None: self.dag_ir = dag_ir self.cc = self.dag_ir.cc # # Sm90 epilogue specific # def sm90_epilogue_tile(self, tile_description): # Get the epilogue tile size schedule = tile_description.epilogue_schedule if schedule == cutlass_library.EpilogueScheduleType.TmaWarpSpecialized: epilogue_tile_mn = (64, 32) elif schedule == cutlass_library.EpilogueScheduleType.TmaWarpSpecializedCooperative: if tile_description.threadblock_shape[0] >= 128: epilogue_tile_mn = (128, 32) else: epilogue_tile_mn = (64, 32) else: raise NotImplementedError(f"Unsupported schedule: {schedule}") # Get the pipeline stages stages_d = 2 epi_tiles = product(shape_div(tuple(tile_description.threadblock_shape)[:2], epilogue_tile_mn)) if self.dag_ir.has_node("C"): element_c = self.dag_ir.get_node_meta("C").element else: element_c = None element_d = self.dag_ir.get_node_meta("D").element if element_c == element_d: reuse_smem_c = True else: reuse_smem_c = False stages_c = max(epi_tiles, stages_d + 1) if reuse_smem_c else epi_tiles # Record the epilogue tile self.cta_tile_mnk = tuple(tile_description.threadblock_shape) self.epilogue_tile_mn = epilogue_tile_mn self.epi_tiles = epi_tiles self.stages_c = stages_c self.stages_d = stages_d self.reuse_smem_c = reuse_smem_c self.element_c = element_c self.element_d = element_d self.is_source_supported = element_c is not None def sm90_epilogue_smem_size(self, tile_description): """ Compute the shared memory size of sm90 collective epilogue """ self.sm90_epilogue_tile(tile_description) # Get the Fusion Storage nodes = self.dag_ir.nodes_topological_order() self.smem_types = {} for node in nodes: meta = self.dag_ir.get_node_meta(node) if not meta.disabled: self.smem_types[node] = meta.underlying_impl.get_smem_size( self.cta_tile_mnk, self.epilogue_tile_mn, self.stages_c, self.stages_d, self.epi_tiles) if node == "D": continue if isinstance(meta, TopoVisitorNode): self.get_dag_smem_type(node) else: self.get_evt_smem_type(node) thread_smem_size = self.smem_types[self.dag_ir.get_all_inputs("D")[0]][0] # Get the Tensor Storage tensors = [] if self.is_source_supported: smem_C = DataTypeSize[self.element_c] * product(self.epilogue_tile_mn) * self.stages_c // 8 tensors.append((smem_C, 128)) else: tensors.append((0, 1)) if self.reuse_smem_c: tensors.append((0, 128)) else: smem_D = DataTypeSize[self.element_d] * product(self.epilogue_tile_mn) * self.stages_d // 8 tensors.append((smem_D, 128)) tensors.append((thread_smem_size, 128)) tensor_smem_size = self.get_struct_size(tensors) # Get pipeline storage size # sizeof(uint64_t * stages_c * 2), alignment of uint64_t # 2 is for FullBarrier and EmptyBarrier pipeline_smem_size = (8 * self.stages_c * 2, 8) # get SharedStorage size smem_size = self.get_struct_size([tensor_smem_size, pipeline_smem_size]) return smem_size[0] def __call__(self, tile_description): return getattr(self, f"sm{self.cc}_epilogue_smem_size")(tile_description) # # Helper functions # @staticmethod def get_visitor_size(members: list, ebo: bool): """ Get the size of struct in bytes """ offset = 0 max_alignment = 1 if len(members) > 0: # Get alignment for _, alignment in members: max_alignment = max(max_alignment, alignment) for type_size, _ in members: if type_size != 0: offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment if type_size == 0 and not ebo: offset += 1 else: offset += type_size offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment return (offset, max_alignment) else: # Struct size is at least 1 return (1, 1) def get_struct_size(self, members: list): """ Get the size of struct in bytes """ return self.get_visitor_size(members, False) def get_evt_smem_type(self, node): # Sort the input nodes by edge weight input_types = [self.smem_types[child] for child in self.dag_ir.get_all_inputs(node)] input_types.append(self.smem_types[node]) if len(input_types) > 1: ebo = len(input_types) > 4 self.smem_types[node] = self.get_visitor_size(input_types, ebo) def get_dag_smem_type(self, node): meta = self.dag_ir.get_node_meta(node) subgraph = meta.subgraph subgraph_nodes = subgraph.nodes_topological_order() # Visit the unvisited nodes in subgraph for n in subgraph_nodes: m = subgraph.get_node_meta(n) if m.disabled: continue else: self.smem_types[n] = m.underlying_impl.get_smem_size( self.cta_tile_mnk, self.epilogue_tile_mn, self.stages_c, self.stages_d, self.epi_tiles) input_types = [self.smem_types[child] for child in subgraph_nodes[:-1]] if len(input_types) > 0: ebo = len(input_types) > 4 self.smem_types[node] = self.get_visitor_size(input_types, ebo)
cutlass/python/cutlass/backend/evt/passes/smem_size_calculator.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/passes/smem_size_calculator.py", "repo_id": "cutlass", "token_count": 3585 }
52
################################################################################################# # # Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for emitting CUTLASS >= 3 convolution kernels """ import enum import os.path import shutil import logging from string import Template try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * except ImportError: from library import * _LOGGER = logging.getLogger(__name__) ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### class EmitConv3xInstance: def __init__(self): _LOGGER.debug("*** EmitConv3xInstance::__init__") # Define epilogue type first, so that the mainloop type # can use it with StageCountAutoCarveout. self.template = """ // CUTLASS >= 3 convolution ${conv_kind_name} kernel instance "${operation_name}" using ${operation_name}_epilogue = typename cutlass::epilogue::collective::CollectiveBuilder< ${arch}, ${opcode_class_epi}, ${tile_shape}, // tile shape ${cluster_shape}, // cluster shape ${epi_tile_mn}, ${element_accumulator}, ${element_compute}, ${element_c}, ${layout_c}, 128 / cute::sizeof_bits_v<${element_c}>, ${element_d}, ${layout_d}, 128 / cute::sizeof_bits_v<${element_d}>, ${epilogue_schedule} // , class FusionOpOrCallbacks = cutlass::epilogue::fusion::LinearCombination<ElementD,ElementCompute> >::CollectiveOp; using ${operation_name}_mainloop = typename cutlass::conv::collective::CollectiveBuilder< ${arch}, ${opcode_class_main}, ${conv_kind}, // kFprop, kDgrad, or kWgrad ${element_a}, ${layout_a}, 128 / cute::sizeof_bits_v<${element_a}>, ${element_b}, ${layout_b}, 128 / cute::sizeof_bits_v<${element_b}>, ${element_accumulator}, ${tile_shape}, // tile shape ${cluster_shape}, // cluster shape ${stages}, ${kernel_schedule} >::CollectiveOp; // Unit tests call this "ConvKernel". // Conv operator ${operation_name} using ${operation_name}_base = cutlass::conv::kernel::ConvUniversal< ${operation_name}_mainloop, ${operation_name}_epilogue, ${tile_scheduler} >; """ def arch_number_to_type(self, arch: int) -> str: return f"cutlass::arch::Sm{arch}" def tile_shape(self, operation) -> str: # For all three kinds of convolutions, the tile shape's K mode # differs from GEMM in that needs to be wrapped in a Shape. # For Wgrad convolutions specifically, # the N tile shape also needs to be wrapped in a Shape. m_template = 'cute::_${tile_shape_m}' if operation.conv_kind == ConvKind.Wgrad: n_template = 'cute::Shape<cute::_${tile_shape_n}>' else: n_template = 'cute::_${tile_shape_n}' k_template = 'cute::Shape<cute::_${tile_shape_k}>' tile_shape_template = f'cute::Shape<{m_template}, {n_template}, {k_template}>' values = { 'tile_shape_m': operation.tile_description.tile_shape[0], 'tile_shape_n': operation.tile_description.tile_shape[1], 'tile_shape_k': operation.tile_description.tile_shape[2] } return Template(tile_shape_template).substitute(values) def cluster_shape(self, operation) -> str: m_template = 'cute::_${cluster_shape_m}' n_template = 'cute::_${cluster_shape_n}' k_template = 'cute::_${cluster_shape_k}' cluster_shape_template = f'cute::Shape<{m_template}, {n_template}, {k_template}>' values = { 'cluster_shape_m': operation.tile_description.cluster_shape[0], 'cluster_shape_n': operation.tile_description.cluster_shape[1], 'cluster_shape_k': operation.tile_description.cluster_shape[2], } return Template(cluster_shape_template).substitute(values) def stage_count(self, operation) -> str: # stages == 0 tells builder to pick the number of stages automatically namespace_prefix = 'cutlass::conv::collective::' if operation.tile_description.stages > 0: return f"{namespace_prefix}StageCount<{str(operation.tile_description.stages)}>" else: return f"{namespace_prefix}StageCountAutoCarveout<sizeof(typename {operation.procedural_name()}_epilogue::SharedStorage)>" def emit(self, operation) -> str: _LOGGER.debug("*** EmitConv3xInstance::emit") _LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name()) # Identify the operation as CUTLASS 3 by its is_3x field if (not hasattr(operation, 'is_3x')) or (not operation.is_3x): raise RuntimeError("operation must be a CUTLASS 3 operation") epi_tile_mn = "cutlass::epilogue::collective::EpilogueTileAuto" opcode_class_main = OpcodeClassTag[operation.tile_description.math_instruction.opcode_class] opcode_class_epi = opcode_class_main tile_shape = operation.tile_description.tile_shape warp_count = operation.tile_description.warp_count epilogue_schedule = EpilogueScheduleTag[operation.epilogue_schedule] # KernelScheduleTag and TileSchedulerTag both hard-code the # namespace qualification of KernelScheduleAuto as # "cutlass::gemm::collective::" (unless the tag is 'void'). # # For TileSchedulerTag, this namespace is fine, since CUTLASS 3 # convolutions use the same tile schedulers (from the same # cutlass::gemm::collective namespace) as GEMMs. kernel_schedule = KernelScheduleTag[operation.kernel_schedule].replace('gemm::', 'conv::') tile_scheduler = TileSchedulerTag[operation.tile_scheduler] opcode_class = OpcodeClassTag[operation.tile_description.math_instruction.opcode_class] values = { 'operation_name': operation.procedural_name(), 'conv_kind': ConvKindTag[operation.conv_kind], 'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'align_a': int(operation.A.alignment), 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'align_b': int(operation.B.alignment), 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'align_c': int(operation.C.alignment), 'element_d': DataTypeTag[operation.D.element], 'layout_d': LayoutTag[operation.D.layout], 'align_d': int(operation.D.alignment), 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': opcode_class, 'arch': self.arch_number_to_type(operation.arch), 'tile_shape': self.tile_shape(operation), 'cluster_shape': self.cluster_shape(operation), 'opcode_class_epi': opcode_class_epi, 'opcode_class_main': opcode_class_main, 'epi_tile_mn': epi_tile_mn, 'stages': self.stage_count(operation), 'kernel_schedule': kernel_schedule, 'epilogue_schedule': epilogue_schedule, 'tile_scheduler': tile_scheduler, 'element_compute': DataTypeTag[operation.element_compute] } return Template(self.template).substitute(values) class EmitConv3xIncludes: def __init__(self): _LOGGER.debug("*** EmitConv3xIncludes::__init__") self.includes = ['conv_operation_3x.hpp', 'cutlass/conv/device/conv_universal_adapter.hpp', 'cutlass/conv/kernel/conv_universal.hpp', 'cutlass/conv/collective/collective_builder.hpp', 'cutlass/epilogue/collective/collective_builder.hpp'] def emit(self, operation) -> str: _LOGGER.debug("*** EmitConv3xIncludes::emit") return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \ "\n\n///////////////////////////////////////////////////////////////////////////////////////////////////"
cutlass/python/cutlass_library/conv3x_emitter.py/0
{ "file_path": "cutlass/python/cutlass_library/conv3x_emitter.py", "repo_id": "cutlass", "token_count": 3573 }
53
/* * basic.css * ~~~~~~~~~ * * Sphinx stylesheet -- basic theme. * * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /* -- main layout ----------------------------------------------------------- */ div.clearer { clear: both; } div.section::after { display: block; content: ''; clear: left; } /* -- relbar ---------------------------------------------------------------- */ div.related { width: 100%; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } /* -- sidebar --------------------------------------------------------------- */ div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; word-wrap: break-word; overflow-wrap : break-word; } div.sphinxsidebar ul { list-style: none; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } div.sphinxsidebar #searchbox form.search { overflow: hidden; } div.sphinxsidebar #searchbox input[type="text"] { float: left; width: 80%; padding: 0.25em; box-sizing: border-box; } div.sphinxsidebar #searchbox input[type="submit"] { float: left; width: 20%; border-left: none; padding: 0.25em; box-sizing: border-box; } img { border: 0; max-width: 100%; } /* -- search page ----------------------------------------------------------- */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li p.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* -- index page ------------------------------------------------------------ */ table.contentstable { width: 90%; margin-left: auto; margin-right: auto; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* -- general index --------------------------------------------------------- */ table.indextable { width: 100%; } table.indextable td { text-align: left; vertical-align: top; } table.indextable ul { margin-top: 0; margin-bottom: 0; list-style-type: none; } table.indextable > tbody > tr > td > ul { padding-left: 0em; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } div.modindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } div.genindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } /* -- domain module index --------------------------------------------------- */ table.modindextable td { padding: 2px; border-collapse: collapse; } /* -- general body styles --------------------------------------------------- */ div.body { min-width: 360px; max-width: 800px; } div.body p, div.body dd, div.body li, div.body blockquote { -moz-hyphens: auto; -ms-hyphens: auto; -webkit-hyphens: auto; hyphens: auto; } a.headerlink { visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink, caption:hover > a.headerlink, p.caption:hover > a.headerlink, div.code-block-caption:hover > a.headerlink { visibility: visible; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } .first { margin-top: 0 !important; } p.rubric { margin-top: 30px; font-weight: bold; } img.align-left, figure.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } img.align-right, figure.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } img.align-center, figure.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; } img.align-default, figure.align-default, .figure.align-default { display: block; margin-left: auto; margin-right: auto; } .align-left { text-align: left; } .align-center { text-align: center; } .align-default { text-align: center; } .align-right { text-align: right; } /* -- sidebars -------------------------------------------------------------- */ div.sidebar, aside.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px; background-color: #ffe; width: 40%; float: right; clear: right; overflow-x: auto; } p.sidebar-title { font-weight: bold; } nav.contents, aside.topic, div.admonition, div.topic, blockquote { clear: left; } /* -- topics ---------------------------------------------------------------- */ nav.contents, aside.topic, div.topic { border: 1px solid #ccc; padding: 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* -- admonitions ----------------------------------------------------------- */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; } div.body p.centered { text-align: center; margin-top: 25px; } /* -- content of sidebars/topics/admonitions -------------------------------- */ div.sidebar > :last-child, aside.sidebar > :last-child, nav.contents > :last-child, aside.topic > :last-child, div.topic > :last-child, div.admonition > :last-child { margin-bottom: 0; } div.sidebar::after, aside.sidebar::after, nav.contents::after, aside.topic::after, div.topic::after, div.admonition::after, blockquote::after { display: block; content: ''; clear: both; } /* -- tables ---------------------------------------------------------------- */ table.docutils { margin-top: 10px; margin-bottom: 10px; border: 0; border-collapse: collapse; } table.align-center { margin-left: auto; margin-right: auto; } table.align-default { margin-left: auto; margin-right: auto; } table caption span.caption-number { font-style: italic; } table caption span.caption-text { } table.docutils td, table.docutils th { padding: 1px 8px 1px 5px; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } th { text-align: left; padding-right: 5px; } table.citation { border-left: solid 1px gray; margin-left: 1px; } table.citation td { border-bottom: none; } th > :first-child, td > :first-child { margin-top: 0px; } th > :last-child, td > :last-child { margin-bottom: 0px; } /* -- figures --------------------------------------------------------------- */ div.figure, figure { margin: 0.5em; padding: 0.5em; } div.figure p.caption, figcaption { padding: 0.3em; } div.figure p.caption span.caption-number, figcaption span.caption-number { font-style: italic; } div.figure p.caption span.caption-text, figcaption span.caption-text { } /* -- field list styles ----------------------------------------------------- */ table.field-list td, table.field-list th { border: 0 !important; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } .field-name { -moz-hyphens: manual; -ms-hyphens: manual; -webkit-hyphens: manual; hyphens: manual; } /* -- hlist styles ---------------------------------------------------------- */ table.hlist { margin: 1em 0; } table.hlist td { vertical-align: top; } /* -- object description styles --------------------------------------------- */ .sig { font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; } .sig-name, code.descname { background-color: transparent; font-weight: bold; } .sig-name { font-size: 1.1em; } code.descname { font-size: 1.2em; } .sig-prename, code.descclassname { background-color: transparent; } .optional { font-size: 1.3em; } .sig-paren { font-size: larger; } .sig-param.n { font-style: italic; } /* C++ specific styling */ .sig-inline.c-texpr, .sig-inline.cpp-texpr { font-family: unset; } .sig.c .k, .sig.c .kt, .sig.cpp .k, .sig.cpp .kt { color: #0033B3; } .sig.c .m, .sig.cpp .m { color: #1750EB; } .sig.c .s, .sig.c .sc, .sig.cpp .s, .sig.cpp .sc { color: #067D17; } /* -- other body styles ----------------------------------------------------- */ ol.arabic { list-style: decimal; } ol.loweralpha { list-style: lower-alpha; } ol.upperalpha { list-style: upper-alpha; } ol.lowerroman { list-style: lower-roman; } ol.upperroman { list-style: upper-roman; } :not(li) > ol > li:first-child > :first-child, :not(li) > ul > li:first-child > :first-child { margin-top: 0px; } :not(li) > ol > li:last-child > :last-child, :not(li) > ul > li:last-child > :last-child { margin-bottom: 0px; } ol.simple ol p, ol.simple ul p, ul.simple ol p, ul.simple ul p { margin-top: 0; } ol.simple > li:not(:first-child) > p, ul.simple > li:not(:first-child) > p { margin-top: 0; } ol.simple p, ul.simple p { margin-bottom: 0; } aside.footnote > span, div.citation > span { float: left; } aside.footnote > span:last-of-type, div.citation > span:last-of-type { padding-right: 0.5em; } aside.footnote > p { margin-left: 2em; } div.citation > p { margin-left: 4em; } aside.footnote > p:last-of-type, div.citation > p:last-of-type { margin-bottom: 0em; } aside.footnote > p:last-of-type:after, div.citation > p:last-of-type:after { content: ""; clear: both; } dl.field-list { display: grid; grid-template-columns: fit-content(30%) auto; } dl.field-list > dt { font-weight: bold; word-break: break-word; padding-left: 0.5em; padding-right: 5px; } dl.field-list > dd { padding-left: 0.5em; margin-top: 0em; margin-left: 0em; margin-bottom: 0em; } dl { margin-bottom: 15px; } dd > :first-child { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } dl > dd:last-child, dl > dd:last-child > :last-child { margin-bottom: 0; } dt:target, span.highlighted { background-color: #fbe54e; } rect.highlighted { fill: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } .versionmodified { font-style: italic; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } .footnote:target { background-color: #ffa; } .line-block { display: block; margin-top: 1em; margin-bottom: 1em; } .line-block .line-block { margin-top: 0; margin-bottom: 0; margin-left: 1.5em; } .guilabel, .menuselection { font-family: sans-serif; } .accelerator { text-decoration: underline; } .classifier { font-style: oblique; } .classifier:before { font-style: normal; margin: 0 0.5em; content: ":"; display: inline-block; } abbr, acronym { border-bottom: dotted 1px; cursor: help; } /* -- code displays --------------------------------------------------------- */ pre { overflow: auto; overflow-y: hidden; /* fixes display issues on Chrome browsers */ } pre, div[class*="highlight-"] { clear: both; } span.pre { -moz-hyphens: none; -ms-hyphens: none; -webkit-hyphens: none; hyphens: none; white-space: nowrap; } div[class*="highlight-"] { margin: 1em 0; } td.linenos pre { border: 0; background-color: transparent; color: #aaa; } table.highlighttable { display: block; } table.highlighttable tbody { display: block; } table.highlighttable tr { display: flex; } table.highlighttable td { margin: 0; padding: 0; } table.highlighttable td.linenos { padding-right: 0.5em; } table.highlighttable td.code { flex: 1; overflow: hidden; } .highlight .hll { display: block; } div.highlight pre, table.highlighttable pre { margin: 0; } div.code-block-caption + div { margin-top: 0; } div.code-block-caption { margin-top: 1em; padding: 2px 5px; font-size: small; } div.code-block-caption code { background-color: transparent; } table.highlighttable td.linenos, span.linenos, div.highlight span.gp { /* gp: Generic.Prompt */ user-select: none; -webkit-user-select: text; /* Safari fallback only */ -webkit-user-select: none; /* Chrome/Safari */ -moz-user-select: none; /* Firefox */ -ms-user-select: none; /* IE10+ */ } div.code-block-caption span.caption-number { padding: 0.1em 0.3em; font-style: italic; } div.code-block-caption span.caption-text { } div.literal-block-wrapper { margin: 1em 0; } code.xref, a code { background-color: transparent; font-weight: bold; } h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { background-color: transparent; } .viewcode-link { float: right; } .viewcode-back { float: right; font-family: sans-serif; } div.viewcode-block:target { margin: -1px -10px; padding: 0 10px; } /* -- math display ---------------------------------------------------------- */ img.math { vertical-align: middle; } div.body div.math p { text-align: center; } span.eqno { float: right; } span.eqno a.headerlink { position: absolute; z-index: 1; } div.math:hover a.headerlink { visibility: visible; } /* -- printout stylesheet --------------------------------------------------- */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0 !important; width: 100%; } div.sphinxsidebar, div.related, div.footer, #top-link { display: none; } }
cutlass/python/docs/_static/basic.css/0
{ "file_path": "cutlass/python/docs/_static/basic.css", "repo_id": "cutlass", "token_count": 6093 }
54
Operations ========== GEMM ---- .. automodule:: cutlass.op.gemm :members: :undoc-members: :show-inheritance: Grouped GEMM ------------ .. automodule:: cutlass.op.gemm_grouped :members: :undoc-members: :show-inheritance: Operation --------- .. automodule:: cutlass.op.op :members: :undoc-members: :show-inheritance:
cutlass/python/docs_src/source/cutlass.op.rst/0
{ "file_path": "cutlass/python/docs_src/source/cutlass.op.rst", "repo_id": "cutlass", "token_count": 146 }
55
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ High-level tests for running batched GEMMs """ from functools import partial import logging from math import prod import unittest import cutlass from cutlass.backend.utils.device import device_cc import torch from utils import LayoutCombination cutlass.set_log_level(logging.WARNING) torch.manual_seed(2023) def pytorch_reference(A, B, C, alpha, beta): # Get the batch count. Assume that any of A, B, and C # with a batch dimension ahve matching batch count. Thus, # we break out of the loop once we have found the first # tensor containing a batch dimension. batch_count = (1,) for tensor in [A, B, C]: if len(tensor.shape) > 2: batch_count = tensor.shape[:-2] break int_batch_count = prod(batch_count) def add_batch(tensor): if len(tensor.shape) == 2: return tensor.unsqueeze(0).repeat(int_batch_count, 1, 1) else: return tensor.reshape(-1, tensor.size(-2), tensor.size(-1)) # Reshape tensors to have batch dimension A = add_batch(A) B = add_batch(B) C = add_batch(C) ret = (torch.bmm(A, B) * alpha) + (C * beta) reshape_vals = batch_count + C.shape[-2:] return ret.reshape(*reshape_vals) def initialize(rows, cols, batch): tensor = torch.randint(-3, 3, size=(rows*cols*prod(batch),), device='cuda').half() if len(batch) > 0 and prod(batch) > 1: reshape_vals = batch + (rows, cols) return tensor.reshape(*reshape_vals) else: return tensor.reshape(rows, cols) class GemmF16Batched(unittest.TestCase): def run_batched(self, batch_count: tuple, batch_A: bool, batch_B: bool, batch_C: bool): M = 512 N = 256 K = 128 alpha = 1. beta = 2. A = initialize(M, K, batch_count if batch_A else (1,)) B = initialize(K, N, batch_count if batch_B else (1,)) C = initialize(M, N, batch_count if batch_C else (1,)) D = initialize(M, N, batch_count) plan = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=cutlass.DataType.f32) plan.run(A, B, C, D, alpha, beta) reference = pytorch_reference(A, B, C, alpha, beta) assert reference.equal(D) def test_batched_ABC(self): self.run_batched((3,), True, True, True) self.run_batched((2, 3), True, True, True) def test_batched_AB(self): self.run_batched((3,), True, True, False) self.run_batched((2, 3), True, True, False) def test_batched_AC(self): self.run_batched((3,), True, False, True) self.run_batched((2, 3), True, False, True) def test_batched_BC(self): self.run_batched((3,), False, True, True) self.run_batched((2, 3), False, True, True) def test_batched_A(self): self.run_batched((3,), True, False, False) self.run_batched((2, 3), True, False, False) def test_batched_B(self): self.run_batched((3,), False, True, False) self.run_batched((2, 3), False, True, False) if __name__ == '__main__': unittest.main()
cutlass/test/python/cutlass/gemm/gemm_batched.py/0
{ "file_path": "cutlass/test/python/cutlass/gemm/gemm_batched.py", "repo_id": "cutlass", "token_count": 1824 }
56