text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Additional permutation information for the example. */ #include "cutlass/layout/permute.h" #include "cutlass/gemm/gemm.h" namespace example { using namespace cute; // This struct is specialized below for different CUTLASS 2.x permutation ops // to describe the operation in terms of target CuTe shape and stride order. template<class Permute> struct PermuteTraits {}; // Use X as a placeholder for shape division result using X = Underscore; // Reshape a rank-2 shape into a multidimensional shape. // Input: // shape = (A, B, ...) // target_shape = ((A1, ..., X, ..., Am), (B1, ..., X, ..., Bn), ...) // Output: // ((A1, ..., A/prod(A1..Am), ..., Am), (B1, ..., B/prod(B1..Bn), ..., Bn), ...) template<class Shape, class TargetShape> constexpr auto reshape(Shape const& shape, TargetShape const& target_shape) { if constexpr (is_tuple<Shape>::value) { return cute::transform(shape, target_shape, [](auto && s, auto && t){ return reshape(s, t); }); } else { auto idx = find_if(target_shape, [](auto x){ return is_underscore<decltype(x)>{}; }); constexpr int I = decltype(idx)::value; static_assert(I < tuple_size_v<TargetShape>, "Each mode of TargetShape must contain a placeholder X"); auto divisors = remove<I>(target_shape); assert(shape % product(divisors) == 0); return replace<I>(target_shape, shape / product(divisors)); } } // Given a tensor layout, compute a permutation layout consisting of: // - sub-modes corresponding to the implied multidimensional shape of the source tensor // - strides accounting for the permutation operation being performed template<class Permute, bool Transpose, class Shape, class Stride> constexpr auto make_permute_layout(Layout<Shape,Stride> const& layout) { static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported"); if constexpr (Transpose) { // Deal with tensor B by transposing appropriately before and after computing the permute layout. // Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch]. return select<1,0,2>(make_permute_layout<Permute, false>(select<1,0,2>(layout))); } else { if constexpr (cutlass::layout::is_trivial_permute<Permute>) { // Special case for NoPermute. Use a depth-2 layout for consistency with other permutations. using ShapeProfile = tuple<tuple<X>, tuple<X>, tuple<X>>; return unflatten(layout, ShapeProfile{}); } else { // Here's where the permutation layout is actually built using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile; using StrideOrder = typename PermuteTraits<Permute>::StrideOrder; return make_ordered_layout(reshape(layout.shape(), ShapeProfile{}), StrideOrder{}); } } } namespace detail { template<int I> struct is_constant_pred { template <class T> constexpr auto operator()(T) { return is_constant<I, T>{}; } }; template<class Permutation, int... I> constexpr auto inverse_impl(Permutation const & perm, seq<I...>) { return cute::make_tuple(Int<find_if(Permutation{}, is_constant_pred<I>{})>{}...); } } // namespace detail // Compute an inverse of a permutation represented as a tuple of cute::Int<> template<class Permutation> constexpr auto inverse(Permutation const & perm) { auto flat_perm = flatten(perm); return unflatten(detail::inverse_impl(flat_perm, tuple_seq<decltype(flat_perm)>{}), perm); } template<class T> using inverse_t = decltype(inverse(T{})); // Given a rank-2 layout of tensor that is assumed to have been permuted, // compute the original rank-2 layout of the tensor prior to the permutation. // This is needed to form the correct input to the standalone permutation kernel. template<class Permute, bool Transpose, class Shape, class Stride> constexpr auto make_original_layout(Layout<Shape,Stride> const& layout) { static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported"); if constexpr (Transpose) { // Deal with tensor B by transposing appropriately before and after computing the permute layout. // Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch]. return select<1,0,2>(make_original_layout<Permute, false>(select<1,0,2>(layout))); } else { using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile; using IndexOrder = typename PermuteTraits<Permute>::IndexOrder; using OrigOrder = conditional_t<cutlass::gemm::detail::is_major<0,Stride>(), seq<0,1,2>, seq<1,0,2>>; auto orig_shape = select(flatten(reshape(layout.shape(), ShapeProfile{})), IndexOrder{}); // print("Permuted shape: "); print(reshape(layout.shape(), ShapeProfile{})); print("\n"); // print("Original shape: "); print(orig_shape); print("\n"); return make_ordered_layout(product_each(orig_shape), OrigOrder{}); } } /////////////// Tensor4DPermute0213 //////////////////// template<int D1, int D2> struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,X>, Shape<X>>; using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>; using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>; }; template<int D1, int D2> struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<Int<D1>,X>, Shape<X>>; using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>; using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>; }; template<int D1, int D2> struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D2>>, Shape<X>>; using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>; using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>; }; template<int D1, int D2> struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<Int<D2>,X>, Shape<X,Int<D1>>, Shape<X>>; using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>; using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>; }; /////////////// Tensor4DPermuteBMM0321 //////////////////// template<int D> struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D>> { static constexpr bool kBatched = true; using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>; using IndexOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>; using StrideOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>; }; template<int D> struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D>> { static constexpr bool kBatched = true; using ShapeProfile = Shape<Shape<X,Int<D>>, Shape<X>, Shape<X>>; using IndexOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>; using StrideOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>; }; /////////////// Tensor4DPermuteBMM0213 //////////////////// template<int D> struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D>> { static constexpr bool kBatched = true; using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>; using IndexOrder = Step<Step<_0>, Step<_1,_2>, Step<_3>>; using StrideOrder = Step<Step<_2>, Step<_0>, Step<_1,_3>>; }; template<int D> struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D>> { static constexpr bool kBatched = true; using ShapeProfile = Shape<Shape<X>, Shape<X,Int<D>>, Shape<X>>; using IndexOrder = Step<Step<_0>, Step<_1>, Step<_2,_3>>; using StrideOrder = Step<Step<_1>, Step<_0,_2>, Step<_3>>; }; /////////////// Tensor5DPermute02413 //////////////////// template<int D1, int D2, int D3> struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajor<D1, D2, D3>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,Int<D3>,X>, Shape<X>>; using IndexOrder = Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>; using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>; }; template<int D1, int D2, int D3> struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<D1, D2, D3>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>; using IndexOrder = Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>; using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>; }; /////////////// Tensor5DPermute20314 //////////////////// template<int D1, int D2, int D3> struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajor<D1, D2, D3>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D3>,Int<D2>>, Shape<X>>; using IndexOrder = Step<Step<_2,_0>, Step<_3,_1,_4>, Step<_5>>; using StrideOrder = Step<Step<_1,_3>, Step<_0,_2,_4>, Step<_5>>; }; template<int D1, int D2, int D3> struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajorInverse<D1, D2, D3>> { static constexpr bool kBatched = false; using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>; using IndexOrder = Step<Step<_3,_0>, Step<_2,_4,_1>, Step<_5>>; using StrideOrder = Step<Step<_4,_2>, Step<_0,_3,_1>, Step<_5>>; }; } // namespace example
cutlass/examples/53_hopper_gemm_permute/permute_traits.hpp/0
{ "file_path": "cutlass/examples/53_hopper_gemm_permute/permute_traits.hpp", "repo_id": "cutlass", "token_count": 4116 }
15
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/atom/copy_atom.hpp" #include <random> #include "cutlass/util/print_error.hpp" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/collective/collective_mma.hpp" using namespace cute; struct AmpereUnpredicatedFprop { // // Static config for conv problem shape // using D = _6; using H = _4; using W = _4; using T = _3; using R = _3; using S = _3; using Z = _4; using P = _2; using Q = _2; using C = _64; using K = _128; // Tiler config using Tiler_K = decltype(cute::min(K{}, _128{})); using Tiler_C = decltype(cute::min(C{}, _32{})); using Tiler_N = _4; using TileM = Tiler_K; using TileN = Shape<Tiler_N, Z, P, Q>; using TileK = Shape<Tiler_C,_1,_1,_1>; using PIPE = _3; using TilerFlt = Shape<TileM, TileK>; using TilerAct = Shape<TileN, TileK>; using TilerOut = Shape<TileM, TileN>; using TileSizeM = Int<size(TileM{})>; using TileSizeN = Int<size(TileN{})>; using TileSizeK = Int<size(TileK{})>; static constexpr int Stages = PIPE::value; using ElementFlt = tfloat32_t; using ElementAct = tfloat32_t; using ElementOut = float; using TiledMma = TiledMMA< MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>, Layout<Shape<_2,_2,_1>>, Tile<_32,_32,Underscore>>; static constexpr int MaxThreadsPerBlock = size(TiledMma{}); static constexpr int MinBlocksPerMultiprocessor = 1; union SharedStorage { struct { ElementFlt sAMatrix[size(TileM{}) * size(TileK{}) * size(PIPE{})]; ElementAct sBMatrix[size(TileN{}) * size(TileK{}) * size(PIPE{})]; } mainloop; struct { ElementOut sCMatrix[size(TileM{}) * size(TileN{})]; } epilogue; }; // // Stencil tensor // using GmemLayoutFlt = decltype(make_ordered_layout( Shape< K, Shape< C, T, R, S>>{}, tuple<_4, tuple<_0,_3,_2,_1>>{})); // We have 64 elements * 32b each in the major mode that we can vectorize // Max vector size is 128b, so lay 16 threads along the major mode with a vector size of 4 // Rest along the minor mode using GmemTiledCopyFlt = decltype(make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementFlt>{}, Layout<Shape <_16, _8>, Stride< _8, _1>>{}, Layout<Shape < _1, _4>>{})); // Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses // using SmemLayoutFlt = decltype( // composition(Swizzle<3,2,3>{}, // make_ordered_layout( // Shape<TileSizeM,TileSizeK,PIPE>{}, // tuple< _1, _0, _2>{}))); using SmemLayoutAtomFlt = decltype( composition(Swizzle<1,2,3>{}, Layout<Shape <_8,Shape <_4, _2>>, Stride<_4,Stride<_1,_32>>>{})); using SmemCopyAtomFlt = Copy_Atom<SM75_U32x4_LDSM_N, ElementFlt>; // // Activation tensor // // Activation tensor is major in the contraction mode, so vectorize that mode first // Then lay out the rest of the threads along the other mode using GmemTiledCopyAct = decltype(make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementAct>{}, Layout<Shape <_16, _8>, Stride< _8, _1>>{}, Layout<Shape < _1, _4>>{})); // Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses // using SmemLayoutAct = decltype( // composition(Swizzle<3,2,3>{}, // make_ordered_layout( // Shape<TileSizeN,TileSizeK,PIPE>{}, // tuple< _1, _0, _2>{}))); using SmemLayoutAtomAct = decltype( composition(Swizzle<1,2,3>{}, Layout<Shape <_8,Shape <_4, _2>>, Stride<_4,Stride<_1,_32>>>{})); using SmemCopyAtomAct = Copy_Atom<SM75_U32x4_LDSM_N, ElementAct>; // // Output tensor // using GmemTiledCopyOut = decltype(make_tiled_copy( Copy_Atom<UniversalCopy<uint128_t>, ElementAct>{}, Layout<Shape <_8, _16>, Stride<_1, _8>>{}, Layout<Shape <_4, _1>>{})); using SmemCopyAtomOut = Copy_Atom<UniversalCopy<uint32_t>, ElementOut>; // This can be optimized to make accesses BCF, but we use a col-major layout here to show off composability using SmemLayoutOut = Layout<Shape<TileSizeM, TileSizeN>>; // // Conv functor // template <class EngineFlt, class TensorActivation, class TensorOutput> void __device__ operator()(cute::Tensor<EngineFlt, GmemLayoutFlt> mFlt, // ( K, (C,T,R,S)) TensorActivation mAct, // ((N,Z,P,Q), (C,T,R,S)) TensorOutput mOut, // ( K, (N,Z,P,Q)) char* smem_buf) const { using namespace cute; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveMma< cutlass::gemm::MainloopSm80CpAsyncUnpredicated<PIPE::value>, Shape<TileM,TileN,TileK>, ElementFlt, Underscore, // Ignore the stride, we are passing full cute::Tensor to operator() ElementAct, Underscore, // Ignore the stride, we are passing full cute::Tensor to operator() TiledMma, GmemTiledCopyFlt, SmemLayoutAtomFlt, SmemCopyAtomFlt, cute::identity, GmemTiledCopyAct, SmemLayoutAtomAct, SmemCopyAtomAct, cute::identity>; TiledMma tiled_mma; Tensor accum = partition_fragment_C(tiled_mma, TilerOut{}); clear(accum); // Set up tensors // NOTE: blockIdx.x projects onto act-NDHW mode, y along the flt-K mode for the sake of higher dynamic range in NDHW Tensor gA_mk = local_tile(mFlt, TilerFlt{}, make_coord(_,_)); // (BLK_M,BLK_K,m',k') Tensor gB_nk = local_tile(mAct, TilerAct{}, make_coord(_,_)); // (BLK_N,BLK_K,n',_1) Tensor gC_mn = local_tile(mOut, TilerOut{}, make_coord(_,_)); // (BLK_M,BLK_N,m',n') // Compute m_coord and n_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.y), shape<2>(gA_mk)); auto n_coord = idx2crd(int(blockIdx.x), shape<2>(gB_nk)); Tensor gA = gA_mk(_,_,m_coord,_); // (BLK_M,BLK_K,k') Tensor gB = gB_nk(_,_,n_coord,_); // (BLK_N,BLK_K,_1) Tensor gC = gC_mn(_,_,m_coord,n_coord); // (BLK_M,BLK_N) auto k_tile_iter = cute::make_coord_iterator(size<2>(gA)); int k_tile_count = size<2>(gA); CollectiveMainloop collective_mma; collective_mma( accum, gA, gB, accum, k_tile_iter, k_tile_count, Underscore{}, // no residue since we do not support predication threadIdx.x, smem_buf); // // Epilogue // SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf); Tensor sC = make_tensor(make_smem_ptr(&storage.epilogue.sCMatrix[0]), SmemLayoutOut{}); auto smem_tiled_copy_C = make_tiled_copy_C(SmemCopyAtomOut{}, tiled_mma); auto smem_thr_copy_C = smem_tiled_copy_C.get_slice(threadIdx.x); auto tCrC = smem_thr_copy_C.retile_S(accum); auto tCsC = smem_thr_copy_C.partition_D(sC); copy(smem_tiled_copy_C, tCrC, tCsC); __syncthreads(); GmemTiledCopyOut gmem_tiled_copy_C; auto gmem_thr_copy_C = gmem_tiled_copy_C.get_slice(threadIdx.x); auto tDsC = gmem_thr_copy_C.partition_S(sC); auto tDgC = gmem_thr_copy_C.partition_D(gC); copy(gmem_tiled_copy_C, tDsC, tDgC); #if 0 if (thread0()) { print("mAct = "); print(mAct); print('\n'); print("mFlt = "); print(mFlt); print('\n'); print("mOut = "); print(mOut); print('\n'); print("gA = "); print(gA); print('\n'); print("gB = "); print(gB); print('\n'); print("gC = "); print(gC); print('\n'); print("sA = "); print(sA.layout()); print('\n'); print("sB = "); print(sB.layout()); print('\n'); print("sC = "); print(sC.layout()); print('\n'); print("tAgA = "); print(tAgA.layout()); print('\n'); print("tBgB = "); print(tBgB.layout()); print('\n'); print("tAsA = "); print(tAsA.layout()); print('\n'); print("tBsB = "); print(tBsB.layout()); print('\n'); print("tCsA = "); print(tCsA.layout()); print('\n'); print("tCsB = "); print(tCsB.layout()); print('\n'); print("tCrC = "); print(tCrC.layout()); print('\n'); print("tCsC = "); print(tCsC.layout()); print('\n'); print("tDsC = "); print(tDsC.layout()); print('\n'); print("tDgC = "); print(tDgC.layout()); print('\n'); print("gmem tiled copy A = "); print(gmem_tiled_copy_A); print('\n'); print("gmem tiled copy B = "); print(gmem_tiled_copy_B); print('\n'); print("gmem tiled copy C = "); print(gmem_tiled_copy_C); print('\n'); print("k_tile_count = "); print(size<2>(gA)); print('\n'); print("k_tile_iter = "); print(*k_tile_iter); print('\n'); print("K_BLOCK_MAX = "); print(K_BLOCK_MAX); print('\n'); } #endif } }; template <class TensorFlt, class TensorAct, class TensorOut> inline int fprop_reference( TensorFlt mStencil, // Logical MK: ( K, (C,T,R,S)) TensorAct mActivation, // Logical NK: ((N,Z,P,Q), (C,T,R,S)) TensorOut mOutput, // Logical MN: ( K, (N,Z,P,Q)) TensorOut mOutputRef) { int32_t N = size<1,0>(mOutputRef); int32_t Z = size<1,1>(mOutputRef); int32_t P = size<1,2>(mOutputRef); int32_t Q = size<1,3>(mOutputRef); int32_t T = size<1,3>(mStencil); int32_t R = size<1,2>(mStencil); int32_t S = size<1,1>(mStencil); int32_t C = size<1,0>(mStencil); size_t K = static_cast<size_t>(size<0>(mOutputRef)); size_t NZPQ = static_cast<size_t>(size<1>(mOutputRef)); size_t CTRS = static_cast<size_t>(size<1>(mStencil)); #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t logical_m = 0; logical_m < K; ++logical_m) { for (size_t logical_n = 0; logical_n < NZPQ; ++logical_n) { auto accumulator = float(0); for (size_t logical_k = 0; logical_k < CTRS; ++logical_k) { accumulator += mStencil(logical_m, logical_k) * mActivation(logical_n, logical_k); } mOutputRef(logical_m, logical_n) = accumulator; } } return print_relative_error(mOutput, mOutputRef, /*print_verbose*/ false, /*print_error*/ true, /*error_margin*/ 0.01); }
cutlass/examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h/0
{ "file_path": "cutlass/examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h", "repo_id": "cutlass", "token_count": 5630 }
16
<jupyter_start><jupyter_text>Exporting a CUTLASS grouped GEMM kernel to a PyTorch CUDA extensionThis notebook walks through a basic example of using the CUTLASS Python interface to declarea grouped GEMM kernel and export it as a PyTorch CUDA extension. Note that GEMM and Conv2d can also be exported as PyTorch CUDA extensions. [](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/02_pytorch_extension_grouped_gemm.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface and PyTorch. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass torch --extra-index-url https://download.pytorch.org/whl/cu121<jupyter_output><empty_output><jupyter_text>Background on grouped GEMMGrouped GEMM enables one to execute a set of GEMMs (each with potentially different sizes and strides)in a single CUDA kernel. It can be thought of as a generalized version of a pointer-array GEMM,without the requirement that the sizes and strides of each GEMM be the same.For example, if one has `p` GEMMs with sizes:```textM_1 x N_1 x K_1M_2 x N_2 x K_2...M_p x N_p x K_p```CUTLASS's grouped GEMM will execute these in a single CUDA kernel.Grouped GEMM is particularly beneficial for saturating the GPU with many small problems that wouldinsufficiently utilize the device in isolation. Declaring a grouped GEMM via the CUTLASS Python interfaceA grouped GEMM operation is declared similarly to a GEMM operation in the CUTLASS Python interface: onesimply calls `cutlass.op.GroupedGemm`.<jupyter_code>import cutlass import torch dtype = torch.float16 plan = cutlass.op.GroupedGemm(element=dtype, layout=cutlass.LayoutType.RowMajor)<jupyter_output><empty_output><jupyter_text>We can then compile and run this operation on a group of GEMMs. We'll first set up some utility functions to initialize GEMMs.<jupyter_code>import random random.seed(2023) # Utility function to initialize A, B, C, and D matrices corresponding to dimensions M, N, and K def initialize(dtype, M, N, K): sizes = [(M, K), (K, N), (M, N), (M, N)] return [torch.randint(-3, 3, size, device='cuda').to(dtype) for size in sizes] # Utility function to generate `problems` GEMMs of random sizes def generate_problems(problems): valid_sizes = [128, 256, 512, 1024] As, Bs, Cs, Ds = [], [], [], [] for _ in range(problems): M, N, K = [random.choice(valid_sizes) for _ in range(3)] A, B, C, D = initialize(dtype, M, N, K) As.append(A) Bs.append(B) Cs.append(C) Ds.append(D) return As, Bs, Cs, Ds<jupyter_output><empty_output><jupyter_text>We'll next run a group of 20 GEMMs via the CUTLASS Python interface and via PyTorch.<jupyter_code>As, Bs, Cs, Ds, = generate_problems(20) plan.run(As, Bs, Cs, Ds, print_module=True) Ds_torch = [a @ b for a, b in zip(As, Bs)] for d, d_torch in zip(Ds, Ds_torch): assert torch.allclose(d, d_torch)<jupyter_output><empty_output><jupyter_text>Exporting the CUTLASS kernel to a PyTorch CUDA extensionThe procedure above allows one to quickly experiment with using a CUTLASS kernels However, one might prefer to use the CUTLASS kernel via a [PyTorch CUDA extension](https://pytorch.org/tutorials/advanced/cpp_extension.html). This will avoids adding any runtime overheads associated with the Python portions of the CUTLASS Python interface.The CUTLASS Python interface provides simple solutions for creating PyTorch CUDA extensions for a CUTLASS kernel. These extensions can either be written out for a later "ahead-of-time" compilation, or be just-in-time compiled and returned to the user.To create a JIT-compiled module from the CUTLASS kernel we defined above, simply call the following:<jupyter_code>op = plan.construct() grouped_gemm = cutlass.emit.pytorch(op, name='grouped_gemm', cc=plan.cc, sourcedir='out', jit=True)<jupyter_output><empty_output><jupyter_text>The `cutlass.emit.pytorch` function emits:* `out/grouped_gemm_kernel.cu`: This file contains the declaration of the CUTLASS kernel and a method to call it from PyTorch tensors* `out/grouped_gemm.cpp`: This file contains a C++ wrapper around the aforementioned CUTLASS kernel* `setup.py`: This file contains the `setuptools` script for building and installing the generated extensionThe extension can be build from within the `module_output` directory by running:```bashTORCH_CUDA_ARCH_LIST="8.0" python setup.py install```Where `TORCH_ARCH_LIST` is set to the compute capability of the device on which the kernel will be run.See the PyTorch ["Custom C++ and CUDA Extensions"](https://pytorch.org/tutorials/advanced/cpp_extension.html) tutorial for more details on this.The PyTorch CUDA extension could be built for this module by running:```bashcd outTORCH_CUDA_ARCH_LIST="8.0" python setup.py```(assuming that one is building for SM80)One could then use the kernel in a later PyTorch module by running:```pythonimport torchimport grouped_gemmgrouped_gemm.run(As, Bs)```In this case, however, we set `jit=True`, which specifies that we would like to compile and load the PyTorch CUDA extension on the fly.Under the hood, this leverages the [torch.utils.cpp_extension.load](https://pytorch.org/tutorials/advanced/cpp_extension.html) methodand returns back the loaded extension.We can then use the extension and compare its results to running the GEMMs via vanilla PyTorch GEMMs:<jupyter_code>Ds = grouped_gemm.run(As, Bs) Ds_torch = [a @ b for a, b in zip(As, Bs)] for d, d_torch in zip(Ds, Ds_torch): assert torch.allclose(d, d_torch)<jupyter_output><empty_output><jupyter_text>Finally, we can profile our grouped GEMM extension:<jupyter_code>num_warmup = 20 num_profile = 100 # Warmup iterations for _ in range(num_warmup): Ds = grouped_gemm.run(As, Bs) Ds_torch = [a @ b for a, b in zip(As, Bs)] torch.cuda.synchronize() # Timing iterations import time grouped = 0 nongrouped = 0 for _ in range(num_profile): start = time.time() Ds = grouped_gemm.run(As, Bs) torch.cuda.synchronize() grouped += time.time() - start start = time.time() Ds_torch = [a @ b for a, b in zip(As, Bs)] torch.cuda.synchronize() nongrouped += time.time() - start print('Grouped: {:.3f} us'.format(grouped * 1e6/num_profile)) print('Non-Grouped: {:.3f} us'.format(nongrouped * 1e6/num_profile)) print('Speedup: {:.3f}'.format(nongrouped / grouped))<jupyter_output><empty_output>
cutlass/examples/python/02_pytorch_extension_grouped_gemm.ipynb/0
{ "file_path": "cutlass/examples/python/02_pytorch_extension_grouped_gemm.ipynb", "repo_id": "cutlass", "token_count": 2266 }
17
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> // Config #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && \ ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)))) # define CUTE_ARCH_CLUSTER_SM90_ENABLED #endif #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && (__CUDACC_VER_MAJOR__ >= 12)) # define CUTE_ARCH_ELECT_ONE_SM90_ENABLED #endif namespace cute { CUTE_DEVICE void cluster_arrive_relaxed() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) asm volatile("barrier.cluster.arrive.relaxed.aligned;\n" : : ); #else CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined"); #endif } CUTE_DEVICE void cluster_arrive() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) asm volatile("barrier.cluster.arrive.aligned;\n" : : ); #else CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined"); #endif } CUTE_DEVICE void cluster_wait() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) asm volatile("barrier.cluster.wait.aligned;\n" : : ); #else CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined"); #endif } CUTE_DEVICE void cluster_sync() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) cluster_arrive(); cluster_wait(); #else CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined"); #endif } // Returns the dim3 grid size in terms of number of clusters. CUTE_DEVICE dim3 cluster_grid_dims() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t x, y, z; asm volatile("mov.u32 %0, %%nclusterid.x;\n" : "=r"(x) : ); asm volatile("mov.u32 %0, %%nclusterid.y;\n" : "=r"(y) : ); asm volatile("mov.u32 %0, %%nclusterid.z;\n" : "=r"(z) : ); return {x, y, z}; #elif defined(__CUDA_ARCH__) // MSVC requires protecting use of gridDim with __CUDA_ARCH__. return gridDim; #elif defined(_MSC_VER) CUTE_INVALID_CONTROL_PATH("cluster_grid_dims() can only be called on device"); return {0, 0, 0}; #else return {0, 0, 0}; #endif } // Returns the dim3 cluster rank in the grid. CUTE_DEVICE dim3 cluster_id_in_grid() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t x, y, z; asm volatile("mov.u32 %0, %%clusterid.x;\n" : "=r"(x) : ); asm volatile("mov.u32 %0, %%clusterid.y;\n" : "=r"(y) : ); asm volatile("mov.u32 %0, %%clusterid.z;\n" : "=r"(z) : ); return {x, y, z}; #elif defined(__CUDA_ARCH__) // MSVC requires protecting use of blockIdx with __CUDA_ARCH__. return blockIdx; #elif defined(_MSC_VER) CUTE_INVALID_CONTROL_PATH("cluster_id_in_grid() can only be called on device"); return {0, 0, 0}; #else return {0, 0, 0}; #endif } // Returns the relative dim3 block rank local to the cluster. CUTE_DEVICE dim3 block_id_in_cluster() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t x, y, z; asm volatile("mov.u32 %0, %%cluster_ctaid.x;\n" : "=r"(x) : ); asm volatile("mov.u32 %0, %%cluster_ctaid.y;\n" : "=r"(y) : ); asm volatile("mov.u32 %0, %%cluster_ctaid.z;\n" : "=r"(z) : ); return {x, y, z}; #else return {0,0,0}; #endif } // Returns the dim3 cluster shape. CUTE_DEVICE dim3 cluster_shape() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t x, y, z; asm volatile("mov.u32 %0, %%cluster_nctaid.x;\n" : "=r"(x) : ); asm volatile("mov.u32 %0, %%cluster_nctaid.y;\n" : "=r"(y) : ); asm volatile("mov.u32 %0, %%cluster_nctaid.z;\n" : "=r"(z) : ); return {x, y, z}; #else return {1,1,1}; #endif } // Get 1D ctaid in a cluster. CUTLASS_DEVICE uint32_t block_rank_in_cluster() { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t rank; asm volatile("mov.u32 %0, %%cluster_ctarank;\n" : "=r"(rank) :); return rank; #else return 0; #endif } // Set the destination block-ID in cluster for a given SMEM Address CUTLASS_DEVICE uint32_t set_block_rank(uint32_t smemAddr, uint32_t rank) { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t result; asm volatile("mapa.shared::cluster.u32 %0, %1, %2;\n" : "=r"(result) : "r"(smemAddr), "r"(rank)); return result; #else return smemAddr; #endif } // Elect one thread in the warp. The elected thread gets its predicate set to true, all others obtain false. CUTE_HOST_DEVICE uint32_t elect_one_sync() { #if defined(CUTE_ARCH_ELECT_ONE_SM90_ENABLED) uint32_t pred = 0; uint32_t laneid = 0; asm volatile( "{\n" ".reg .b32 %%rx;\n" ".reg .pred %%px;\n" " elect.sync %%rx|%%px, %2;\n" "@%%px mov.s32 %1, 1;\n" " mov.s32 %0, %%rx;\n" "}\n" : "+r"(laneid), "+r"(pred) : "r"(0xFFFFFFFF)); return pred; #elif defined(__CUDA_ARCH__) return (threadIdx.x % 32) == 0; #else return true; #endif } struct ElectOneLaneIdReturnType { uint32_t is_leader; uint32_t leader_lane_id; }; CUTE_HOST_DEVICE ElectOneLaneIdReturnType elect_one_leader_sync() { #if defined(CUTE_ARCH_ELECT_ONE_SM90_ENABLED) uint32_t pred = 0; uint32_t laneid = 0; asm volatile( "{\n" ".reg .b32 %%rx;\n" ".reg .pred %%px;\n" " elect.sync %%rx|%%px, %2;\n" "@%%px mov.s32 %1, 1;\n" " mov.s32 %0, %%rx;\n" "}\n" : "+r"(laneid), "+r"(pred) : "r"(0xFFFFFFFF)); return {pred, laneid}; #elif defined(__CUDA_ARCH__) return {(threadIdx.x % 32) == 0, 0}; #else return {true, 0}; #endif } // Store value to remote shared memory in the cluster CUTE_DEVICE void store_shared_remote(uint32_t value, uint32_t smem_addr, uint32_t mbarrier_addr, uint32_t dst_cta_rank) { #if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED) uint32_t dsmem_addr = set_block_rank(smem_addr, dst_cta_rank); uint32_t remote_barrier_addr = set_block_rank(mbarrier_addr, dst_cta_rank); asm volatile("st.async.shared::cluster.mbarrier::complete_tx::bytes.u32 [%0], %1, [%2];" : : "r"(dsmem_addr), "r"(value), "r"(remote_barrier_addr)); #endif } } // end namespace cute
cutlass/include/cute/arch/cluster_sm90.hpp/0
{ "file_path": "cutlass/include/cute/arch/cluster_sm90.hpp", "repo_id": "cutlass", "token_count": 3131 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/numeric/integer_sequence.hpp> #if defined(__clang__) && defined(__CUDA__) // __cvta_generic_to_shared was added in Clang 14: https://reviews.llvm.org/D111665 #if __clang_major__ >= 14 #define CUTE_CLANG_SUPPORTS_CVTA_GENERIC_TO_SHARED 1 #endif // __nvvm_get_smem_pointer added in Clang 14: https://reviews.llvm.org/D111665 // ... but will not work on Windows until Clang 15: https://reviews.llvm.org/D122897 #if (!defined(_WIN32) && __clang_major__ >= 14) || __clang_major__ >= 15 #define CUTE_CLANG_SUPPORTS_NVVM_GET_SMEM_POINTER 1 #endif #endif #if defined(__NVCC__) || defined(__CUDACC_RTC__) // __cvta_generic_to_shared added in CUDA 11+ #if __CUDACC_VER_MAJOR__ >= 11 #define CUTE_NVCC_SUPPORTS_CVTA_GENERIC_TO_SHARED 1 #endif // __nvvm_get_smem_pointer added in CUDA 10.2 #if __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2 #define CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER 1 #endif #endif #if CUTE_NVCC_SUPPORTS_CVTA_GENERIC_TO_SHARED || CUTE_CLANG_SUPPORTS_CVTA_GENERIC_TO_SHARED #define CUTE_CVTA_GENERIC_TO_SHARED_SUPPORTED 1 #endif #if !defined(CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED) && CUTE_CVTA_GENERIC_TO_SHARED_SUPPORTED && defined(__CUDA_ARCH__) #define CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED 1 #endif #if CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER || CUTE_CLANG_SUPPORTS_NVVM_GET_SMEM_POINTER #define CUTE_NVVM_GET_SMEM_POINTER_SUPPORTED 1 #endif #if !defined(CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED) && CUTE_NVVM_GET_SMEM_POINTER_SUPPORTED && defined(__CUDA_ARCH__) #define CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED 1 #endif // Clang 14+ provides a declaration of __nvvm_get_smem_pointer, so we only need // to provide one for NVCC #if CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER extern "C" { // This NVVM intrinsic is subject to change in future versions of CUDA. // Clients should not call it directly. CUTE_DEVICE uint32_t __nvvm_get_smem_pointer(void*); } #endif namespace cute { /// CUTE helper to cast SMEM pointer to unsigned CUTE_DEVICE uint32_t cast_smem_ptr_to_uint(void const* const ptr) { // We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to // the previous internal intrinsics if they are available. #if CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED // // This NVVM intrinsic converts an address in shared memory to a plain // unsigned integer. This is necessary to pass to shared memory instructions // in inline PTX. // // In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2]. // //__device__ size_t __cvta_generic_to_shared(void* ptr); /// CUTE helper to get SMEM pointer return static_cast<uint32_t>(__cvta_generic_to_shared(ptr)); #elif CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED return __nvvm_get_smem_pointer(ptr); #elif defined(__CUDA_ARCH__) uint32_t smem_ptr; asm( "{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n" : "=r"(smem_ptr) : "l"(ptr)); return smem_ptr; #else (void) ptr; printf("ERROR: cast_smem_ptr_to_uint not supported but used.\n"); return 0; #endif } namespace detail { // // Wrapper for MMAOp::fma // template <class MmaOp> struct CallFMA { template <class... Args> CUTE_HOST_DEVICE constexpr void operator()(Args&&... args) const { return MmaOp::fma(static_cast<Args&&>(args)...); } }; // // Wrapper for CopyOp::copy // template <class CopyOp> struct CallCOPY { template <class... Args> CUTE_HOST_DEVICE constexpr void operator()(Args&&... args) const { return CopyOp::copy(static_cast<Args&&>(args)...); } }; // // Utility for exploding pointers/arrays/tensors into functions // template <class Fn, class PtrA, int... I> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrA&& a, int_sequence<I...>) { return fn(a[I]...); } template <class Fn, class PtrS, int... Is, class PtrD, int... Id> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrS&& s, int_sequence<Is...>, PtrD&& d, int_sequence<Id...>) { return fn(s[Is]..., d[Id]...); } template <class Fn, class PtrA, int... Ia, class PtrB, int... Ib, class PtrC, int... Ic> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrA&& a, int_sequence<Ia...>, PtrB&& b, int_sequence<Ib...>, PtrC&& c, int_sequence<Ic...>) { return fn(a[Ia]..., b[Ib]..., c[Ic]...); } template <class Fn, class PtrD, int... Id, class PtrA, int... Ia, class PtrB, int... Ib, class PtrC, int... Ic> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrD&& d, int_sequence<Id...>, PtrA&& a, int_sequence<Ia...>, PtrB&& b, int_sequence<Ib...>, PtrC&& c, int_sequence<Ic...>) { return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]...); } template <class Fn, class PtrD, int... Id, class PtrA, int... Ia, class PtrB, int... Ib, class PtrC, int... Ic, class PtrE, int... Ie> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrD&& d, int_sequence<Id...>, PtrA&& a, int_sequence<Ia...>, PtrB&& b, int_sequence<Ib...>, PtrC&& c, int_sequence<Ic...>, PtrE&& e, int_sequence<Ie...>) { return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]..., e[Ie]...); } template <class Fn, class PtrD, int... Id, class PtrA, int... Ia, class PtrB, int... Ib, class PtrC, int... Ic, class PtrSFA, int... Isfa, class PtrSFB, int... Isfb> CUTE_HOST_DEVICE constexpr void explode(Fn fn, PtrD&& d, int_sequence<Id...>, PtrA&& a, int_sequence<Ia...>, PtrB&& b, int_sequence<Ib...>, PtrC&& c, int_sequence<Ic...>, PtrSFA&& sfa, int_sequence<Isfa...>, PtrSFB&& sfb, int_sequence<Isfb...>) { return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]..., sfa[Isfa]..., sfb[Isfb]...); } // // Utility for exploding tuples into functions // template <class Fn, class TupleA, int... I> CUTE_HOST_DEVICE constexpr void explode_tuple(Fn fn, TupleA&& a, int_sequence<I...>) { return fn(get<I>(a)...); } template <class Fn, class TupleA, int... Ia, class TupleB, int... Ib> CUTE_HOST_DEVICE constexpr void explode_tuple(Fn fn, TupleA&& a, int_sequence<Ia...>, TupleB&& b, int_sequence<Ib...>) { return fn(get<Ia>(a)..., get<Ib>(b)...); } template <class Fn, class TupleA, int... Ia, class TupleB, int... Ib, class TupleC, int... Ic> CUTE_HOST_DEVICE constexpr void explode_tuple(Fn fn, TupleA&& a, int_sequence<Ia...>, TupleB&& b, int_sequence<Ib...>, TupleC&& c, int_sequence<Ic...>) { return fn(get<Ia>(a)..., get<Ib>(b)..., get<Ic>(c)...); } } // end namespace detail } // end namespace cute
cutlass/include/cute/arch/util.hpp/0
{ "file_path": "cutlass/include/cute/arch/util.hpp", "repo_id": "cutlass", "token_count": 3703 }
19
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <cstdint> #endif #include <cutlass/numeric_types.h> namespace cute { // // Signed integers // using int2_t = cutlass::int2b_t; using int4_t = cutlass::int4b_t; using CUTE_STL_NAMESPACE::int8_t; using CUTE_STL_NAMESPACE::int16_t; using CUTE_STL_NAMESPACE::int32_t; using CUTE_STL_NAMESPACE::int64_t; template <int N> struct int_bit; template <> struct int_bit< 2> { using type = cutlass::int2b_t; }; template <> struct int_bit< 4> { using type = cutlass::int4b_t; }; template <> struct int_bit< 8> { using type = int8_t; }; template <> struct int_bit< 16> { using type = int16_t; }; template <> struct int_bit< 32> { using type = int32_t; }; template <> struct int_bit< 64> { using type = int64_t; }; template <int N> using int_bit_t = typename int_bit<N>::type; template <int N> using int_byte = int_bit<8*N>; template <int N> using int_byte_t = typename int_byte<N>::type; // // Unsigned integers // using uint1_t = cutlass::uint1b_t; using uint2_t = cutlass::uint2b_t; using uint4_t = cutlass::uint4b_t; using CUTE_STL_NAMESPACE::uint8_t; using CUTE_STL_NAMESPACE::uint16_t; using CUTE_STL_NAMESPACE::uint32_t; using CUTE_STL_NAMESPACE::uint64_t; using cutlass::uint128_t; template <int N> struct uint_bit; template <> struct uint_bit< 1> { using type = cutlass::uint1b_t; }; template <> struct uint_bit< 2> { using type = cutlass::uint2b_t; }; template <> struct uint_bit< 4> { using type = cutlass::uint4b_t; }; template <> struct uint_bit< 8> { using type = uint8_t; }; template <> struct uint_bit< 16> { using type = uint16_t; }; template <> struct uint_bit< 32> { using type = uint32_t; }; template <> struct uint_bit< 64> { using type = uint64_t; }; template <> struct uint_bit<128> { using type = cutlass::uint128_t; }; template <int N> using uint_bit_t = typename uint_bit<N>::type; template <int N> using uint_byte = uint_bit<8*N>; template <int N> using uint_byte_t = typename uint_byte<N>::type; } // namespace cute
cutlass/include/cute/numeric/int.hpp/0
{ "file_path": "cutlass/include/cute/numeric/int.hpp", "repo_id": "cutlass", "token_count": 1299 }
20
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/container/tuple.hpp> #include <cute/algorithm/tuple_algorithms.hpp> #include <cute/numeric/integral_constant.hpp> #include <cute/numeric/integer_sequence.hpp> namespace cute { // For slicing struct Underscore : Int<0> {}; CUTE_INLINE_CONSTANT Underscore _; // Convenient alias using X = Underscore; // Treat Underscore as an integral like integral_constant template <> struct is_integral<Underscore> : true_type {}; template <class T> struct is_underscore : false_type {}; template <> struct is_underscore<Underscore> : true_type {}; // Tuple trait for detecting static member element template <class Tuple, class Elem, class Enable = void> struct has_elem : false_type {}; template <class Elem> struct has_elem<Elem, Elem> : true_type {}; template <class Tuple, class Elem> struct has_elem<Tuple, Elem, enable_if_t<is_tuple<Tuple>::value> > : has_elem<Tuple, Elem, tuple_seq<Tuple> > {}; template <class Tuple, class Elem, int... Is> struct has_elem<Tuple, Elem, seq<Is...>> : disjunction<has_elem<tuple_element_t<Is, Tuple>, Elem>...> {}; // Tuple trait for detecting static member element template <class Tuple, class Elem, class Enable = void> struct all_elem : false_type {}; template <class Elem> struct all_elem<Elem, Elem> : true_type {}; template <class Tuple, class Elem> struct all_elem<Tuple, Elem, enable_if_t<is_tuple<Tuple>::value> > : all_elem<Tuple, Elem, tuple_seq<Tuple> > {}; template <class Tuple, class Elem, int... Is> struct all_elem<Tuple, Elem, seq<Is...>> : conjunction<all_elem<tuple_element_t<Is, Tuple>, Elem>...> {}; // Tuple trait for detecting Underscore member template <class Tuple> using has_underscore = has_elem<Tuple, Underscore>; template <class Tuple> using all_underscore = all_elem<Tuple, Underscore>; template <class Tuple> using has_int1 = has_elem<Tuple, Int<1>>; template <class Tuple> using has_int0 = has_elem<Tuple, Int<0>>; // // Slice keeps only the elements of Tuple B that are paired with an Underscore // namespace detail { template <class A, class B> CUTE_HOST_DEVICE constexpr auto lift_slice(A const& a, B const& b) { if constexpr (is_tuple<A>::value) { static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks"); return filter_tuple(a, b, [](auto const& x, auto const& y) { return lift_slice(x,y); }); } else if constexpr (is_underscore<A>::value) { return cute::tuple<B>{b}; } else { return cute::tuple<>{}; } CUTE_GCC_UNREACHABLE; } } // end namespace detail // Entry point overrides the lifting so that slice(_,b) == b template <class A, class B> CUTE_HOST_DEVICE constexpr auto slice(A const& a, B const& b) { if constexpr (is_tuple<A>::value) { static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks"); return filter_tuple(a, b, [](auto const& x, auto const& y) { return detail::lift_slice(x,y); }); } else if constexpr (is_underscore<A>::value) { return b; } else { return cute::tuple<>{}; } CUTE_GCC_UNREACHABLE; } // // Dice keeps only the elements of Tuple B that are paired with an Int // namespace detail { template <class A, class B> CUTE_HOST_DEVICE constexpr auto lift_dice(A const& a, B const& b) { if constexpr (is_tuple<A>::value) { static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks"); return filter_tuple(a, b, [](auto const& x, auto const& y) { return lift_dice(x,y); }); } else if constexpr (is_underscore<A>::value) { return cute::tuple<>{}; } else { return cute::tuple<B>{b}; } CUTE_GCC_UNREACHABLE; } } // end namespace detail // Entry point overrides the lifting so that dice(1,b) == b template <class A, class B> CUTE_HOST_DEVICE constexpr auto dice(A const& a, B const& b) { if constexpr (is_tuple<A>::value) { static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks"); return filter_tuple(a, b, [](auto const& x, auto const& y) { return detail::lift_dice(x,y); }); } else if constexpr (is_underscore<A>::value) { return cute::tuple<>{}; } else { return b; } CUTE_GCC_UNREACHABLE; } // // Display utilities // CUTE_HOST_DEVICE void print(Underscore const&) { printf("_"); } #if !defined(__CUDACC_RTC__) CUTE_HOST std::ostream& operator<<(std::ostream& os, Underscore const&) { return os << "_"; } #endif } // end namespace cute
cutlass/include/cute/underscore.hpp/0
{ "file_path": "cutlass/include/cute/underscore.hpp", "repo_id": "cutlass", "token_count": 2184 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply for SM75 */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) // CUDA Toolkit includes for nvcuda::wmma needed for binarized matrix multiply. #include <mma.h> #include "cutlass/wmma_array.h" #endif // CUTLASS includes #include "cutlass/arch/mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// #if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)) #define CUTLASS_ARCH_MMA_SM75_SUPPORTED 1 #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)) #define CUTLASS_ARCH_MMA_SM75_ENABLED #endif #endif //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { //////////////////////////////////////////////////////////////////////////////// // // Matrix Multiply 1688 - FP16 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation - F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<16, 8, 8>, 32, half_t, layout::RowMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<16, 8, 8>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 2>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 4>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile( "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3}, {%4}, {%5,%6};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Matrix Multiply 1688 - FP32 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<16, 8, 8>, 32, half_t, layout::RowMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<16, 8, 8>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 2>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]) ); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Integer matrix multiply .8816 (8b) // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S8 * S8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, int8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 4>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U8 * S8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, uint8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 4>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = S8 * U8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, int8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 4>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.s8.u8 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U8 * U8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, uint8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 4>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Integer matrix multiply (8b) with SATURATE // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S8 * S8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, int8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 4>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U8 * S8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, uint8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 4>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = S8 * U8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, int8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 4>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U8 * U8 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 16>, 32, uint8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 16>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 4>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 4>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Integer matrix multiply (4b) // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S4 * S4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, int4b_t, layout::RowMajor, int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int4b_t, 8>; using ElementB = int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U4 * S4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, uint4b_t, layout::RowMajor, int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint4b_t, 8>; using ElementB = int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = S4 * U4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, int4b_t, layout::RowMajor, uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int4b_t, 8>; using ElementB = uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U4 * U4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, uint4b_t, layout::RowMajor, uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint4b_t, 8>; using ElementB = uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Integer matrix multiply (4b) - SATURATE // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S4 * S4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, int4b_t, layout::RowMajor, int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int4b_t, 8>; using ElementB = int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U4 * S4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, uint4b_t, layout::RowMajor, int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint4b_t, 8>; using ElementB = int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = S4 * U4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, int4b_t, layout::RowMajor, uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int4b_t, 8>; using ElementB = uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; /// Matrix multiply-add operation: S32 = U4 * U4 + S32 template <> struct Mma< gemm::GemmShape<8, 8, 32>, 32, uint4b_t, layout::RowMajor, uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate> { using Shape = gemm::GemmShape<8, 8, 32>; using ElementA = uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint4b_t, 8>; using ElementB = uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint4b_t, 8>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpMultiplyAddSaturate; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) unsigned const & A = reinterpret_cast<unsigned const &>(a); unsigned const & B = reinterpret_cast<unsigned const &>(b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // b1 ^ b1 + s32 => s32 // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template <> struct Mma< gemm::GemmShape<8,8,128>, 32, uint1b_t, layout::RowMajor, uint1b_t, layout::ColumnMajor, int, layout::RowMajor, OpXorPopc> { using Shape = gemm::GemmShape<8,8,128>; using ElementA = uint1b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint1b_t, 32>; using ElementB = uint1b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint1b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 2>; using Operator = OpXorPopc; using ArchTag = arch::Sm75; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) const { #if defined(CUTLASS_ARCH_MMA_SM75_ENABLED) #if defined(CUTLASS_ARCH_WMMA_ENABLED) using WmmaFragmentA = nvcuda::wmma::fragment< nvcuda::wmma::matrix_a, Shape::kM, Shape::kN, Shape::kK, nvcuda::wmma::experimental::precision::b1, nvcuda::wmma::row_major>; using WmmaFragmentB = nvcuda::wmma::fragment< nvcuda::wmma::matrix_b, Shape::kM, Shape::kN, Shape::kK, nvcuda::wmma::experimental::precision::b1, nvcuda::wmma::col_major>; using WmmaFragmentC = nvcuda::wmma::fragment< nvcuda::wmma::accumulator, Shape::kM, Shape::kN, Shape::kK, int>; WmmaFragmentA const & A = reinterpret_cast<WmmaFragmentA const &>(a); WmmaFragmentB const & B = reinterpret_cast<WmmaFragmentB const &>(b); WmmaFragmentC const & C = reinterpret_cast<WmmaFragmentC const &>(c); WmmaFragmentC & D = reinterpret_cast<WmmaFragmentC &>(d); nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR, nvcuda::wmma::experimental::bmmaAccumulateOpPOPC); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); CUTLASS_NOT_IMPLEMENTED(); // WMMA must be supported to issue binary matrix multiply-accumulate instructions. #endif // defined(CUTLASS_ARCH_WMMA_ENABLED) #endif } }; //////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/mma_sm75.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sm75.h", "repo_id": "cutlass", "token_count": 13405 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically sized array for any data type template < typename T, int N > struct Array<T, N, false> { static constexpr int kSizeBits = sizeof_bits<T>::value * N; /// Storage type using Storage = typename platform::conditional< ((kSizeBits % 32) != 0), typename platform::conditional< ((kSizeBits % 16) != 0), uint8_t, uint16_t >::type, uint32_t >::type; /// Element type using Element = T; /// Number of logical elements per stored object static constexpr int kElementsPerStoredItem = int(sizeof(Storage) * 8) / sizeof_bits<T>::value; /// Number of storage elements static constexpr size_t kStorageElements = (N + kElementsPerStoredItem - 1) / kElementsPerStoredItem; /// Number of logical elements static constexpr size_t kElements = N; /// Bitmask for covering one item static constexpr Storage kMask = ((Storage(1) << sizeof_bits<T>::value) - 1); // // C++ standard members with pointer types removed // typedef T value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef value_type *pointer; typedef value_type const *const_pointer; // // References // /// Reference object inserts or extracts sub-byte items class reference { /// Pointer to storage element Storage *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: reference() = default; /// Ctor CUTLASS_HOST_DEVICE reference(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } /// Assignment CUTLASS_HOST_DEVICE reference &operator=(T x) { // `*ptr_ & kUpdateMask` will read ptr_ before write to it // This means code pattern like // // ```cpp // Array<half_t, N> result; // result[0] = xxx; // ``` // // Will leads to compiler warning on use of unintialized member variable. Although we know // this read of uninitialized member variable is harmeless. #if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wuninitialized" #elif defined(__GNUC__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wuninitialized" # pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif Storage item = (reinterpret_cast<Storage const &>(x) & kMask); Storage kUpdateMask = Storage(~(kMask << (idx_ * sizeof_bits<T>::value))); *ptr_ = Storage(((*ptr_ & kUpdateMask) | (item << idx_ * sizeof_bits<T>::value))); #if defined(__clang__) # pragma clang diagnostic pop #elif defined(__GNUC__) # pragma GCC diagnostic pop #endif return *this; } CUTLASS_HOST_DEVICE T get() const { Storage item = Storage((*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask); return reinterpret_cast<T const &>(item); } /// Extract CUTLASS_HOST_DEVICE operator T() const { return get(); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } }; /// Reference object extracts sub-byte items class const_reference { /// Pointer to storage element Storage const *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: const_reference() = default; /// Ctor CUTLASS_HOST_DEVICE const_reference(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } CUTLASS_HOST_DEVICE const T get() const { Storage item = (*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask; return reinterpret_cast<T const &>(item); } /// Extract CUTLASS_HOST_DEVICE operator T() const { Storage item = Storage(Storage(*ptr_ >> Storage(idx_ * sizeof_bits<T>::value)) & kMask); return reinterpret_cast<T const &>(item); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } }; // // Iterators // /// Bidirectional iterator over elements class iterator { /// Pointer to storage element Storage *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: iterator() = default; CUTLASS_HOST_DEVICE iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } CUTLASS_HOST_DEVICE iterator &operator++() { ++idx_; if (idx_ == kElementsPerStoredItem) { ++ptr_; idx_ = 0; } return *this; } CUTLASS_HOST_DEVICE iterator &operator--() { if (!idx_) { --ptr_; idx_ = kElementsPerStoredItem - 1; } else { --idx_; } return *this; } CUTLASS_HOST_DEVICE iterator operator++(int) { iterator ret(*this); ++idx_; if (idx_ == kElementsPerStoredItem) { ++ptr_; idx_ = 0; } return ret; } CUTLASS_HOST_DEVICE iterator operator--(int) { iterator ret(*this); if (!idx_) { --ptr_; idx_ = kElementsPerStoredItem - 1; } else { --idx_; } return ret; } CUTLASS_HOST_DEVICE reference operator*() const { return reference(ptr_, idx_); } CUTLASS_HOST_DEVICE bool operator==(iterator const &other) const { return ptr_ == other.ptr_ && idx_ == other.idx_; } CUTLASS_HOST_DEVICE bool operator!=(iterator const &other) const { return !(*this == other); } }; /// Bidirectional constant iterator over elements class const_iterator { /// Pointer to storage element Storage const *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: const_iterator() = default; CUTLASS_HOST_DEVICE const_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } CUTLASS_HOST_DEVICE iterator &operator++() { ++idx_; if (idx_ == kElementsPerStoredItem) { ++ptr_; idx_ = 0; } return *this; } CUTLASS_HOST_DEVICE iterator &operator--() { if (!idx_) { --ptr_; idx_ = kElementsPerStoredItem - 1; } else { --idx_; } return *this; } CUTLASS_HOST_DEVICE iterator operator++(int) { iterator ret(*this); ++idx_; if (idx_ == kElementsPerStoredItem) { ++ptr_; idx_ = 0; } return ret; } CUTLASS_HOST_DEVICE iterator operator--(int) { iterator ret(*this); if (!idx_) { --ptr_; idx_ = kElementsPerStoredItem - 1; } else { --idx_; } return ret; } CUTLASS_HOST_DEVICE const_reference operator*() const { return const_reference(ptr_, idx_); } CUTLASS_HOST_DEVICE bool operator==(iterator const &other) const { return ptr_ == other.ptr_ && idx_ == other.idx_; } CUTLASS_HOST_DEVICE bool operator!=(iterator const &other) const { return !(*this == other); } }; /// Bidirectional iterator over elements class reverse_iterator { /// Pointer to storage element Storage *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: reverse_iterator() = default; CUTLASS_HOST_DEVICE reverse_iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } }; /// Bidirectional constant iterator over elements class const_reverse_iterator { /// Pointer to storage element Storage const *ptr_{nullptr}; /// Index into elements packed into Storage object int idx_{0}; public: const_reverse_iterator() = default; CUTLASS_HOST_DEVICE const_reverse_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { } }; /// Efficient clear method CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < int(kStorageElements); ++i) { storage[i] = Storage(0); } } CUTLASS_HOST_DEVICE reference at(size_type pos) { return reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem); } CUTLASS_HOST_DEVICE const_reference at(size_type pos) const { return const_reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem); } CUTLASS_HOST_DEVICE reference operator[](size_type pos) { return at(pos); } CUTLASS_HOST_DEVICE const_reference operator[](size_type pos) const { return at(pos); } CUTLASS_HOST_DEVICE reference front() { return at(0); } CUTLASS_HOST_DEVICE const_reference front() const { return at(0); } CUTLASS_HOST_DEVICE reference back() { return reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1); } CUTLASS_HOST_DEVICE const_reference back() const { return const_reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1); } CUTLASS_HOST_DEVICE pointer data() { return reinterpret_cast<pointer>(storage); } CUTLASS_HOST_DEVICE const_pointer data() const { return reinterpret_cast<const_pointer>(storage); } CUTLASS_HOST_DEVICE Storage * raw_data() { return storage; } CUTLASS_HOST_DEVICE Storage const * raw_data() const { return storage; } CUTLASS_HOST_DEVICE constexpr bool empty() const { return !kElements; } CUTLASS_HOST_DEVICE constexpr size_type size() const { return kElements; } CUTLASS_HOST_DEVICE constexpr size_type max_size() const { return kElements; } CUTLASS_HOST_DEVICE void fill(T const &value) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerStoredItem; ++i) { reference ref(storage, i); ref = value; } CUTLASS_PRAGMA_UNROLL for (int i = 1; i < kStorageElements; ++i) { storage[i] = storage[0]; } } CUTLASS_HOST_DEVICE iterator begin() { return iterator(storage); } CUTLASS_HOST_DEVICE const_iterator cbegin() const { return const_iterator(storage); } CUTLASS_HOST_DEVICE iterator end() { return iterator(storage + kStorageElements); } CUTLASS_HOST_DEVICE const_iterator cend() const { return const_iterator(storage + kStorageElements); } CUTLASS_HOST_DEVICE reverse_iterator rbegin() { return reverse_iterator(storage + kStorageElements); } CUTLASS_HOST_DEVICE const_reverse_iterator crbegin() const { return const_reverse_iterator(storage + kStorageElements); } CUTLASS_HOST_DEVICE reverse_iterator rend() { return reverse_iterator(storage); } CUTLASS_HOST_DEVICE const_reverse_iterator crend() const { return const_reverse_iterator(storage); } private: /// Internal storage Storage storage[kStorageElements]; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/array_subbyte.h/0
{ "file_path": "cutlass/include/cutlass/array_subbyte.h", "repo_id": "cutlass", "token_count": 5033 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This file contains definitions and utility functions for describing convolution problem sizes. Conv3dProblem desciption: activation (NDHWC), filter (KTRSC), output (NZPQK), pading (pad_d, pad_h, pad_w), stride (stride_d, stride_h, stride_w), dilation (dilation_d, dilation_h, dilation_w). Free functions to map: Map tensor extents (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator) Map tensor sizes (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator) Map tensor problem sizes (Conv3d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator) */ #pragma once #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" namespace cutlass { namespace conv { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Problem size structure struct Conv3dProblemSize : public Conv2dProblemSize { // // Type definitions // // 3D coordinate for padding, stride, and dilation in (d, h, w) dimensions using Coord3D = Coord<3>; // // Data members // // Conv3d strictly problem size parameters int D, T, Z; // input depth, filter depth, output depth int pad_d; // padding in depth dimension int stride_d; // stride in depth dimension int dilation_d; // dilation in depth dimension // // Methods // public: CUTLASS_HOST_DEVICE Conv3dProblemSize(): Conv2dProblemSize(), D(0), T(0), Z(0), pad_d(0), stride_d(1), dilation_d(1) { } /// Constructor for default padding, stride, dilation, and split-K CUTLASS_HOST_DEVICE Conv3dProblemSize( int N, int D, int H, int W, int C, int Z, int P, int Q, int K, int T, int R, int S, Mode mode ): Conv2dProblemSize(N, H, W, C, P, Q, K, R, S, mode), D(D), T(T), Z(Z), pad_d(T / 2), stride_d(1), dilation_d(1) { } /// Constructor CUTLASS_HOST_DEVICE Conv3dProblemSize( int N, int D, int H, int W, int C, int K, int T, int R, int S, int Z, int P, int Q, int pad_d, int pad_h, int pad_w, int stride_d, int stride_h, int stride_w, int dilation_d, int dilation_h, int dilation_w, Mode mode, int split_k_slices = 1, int groups = 1 ): Conv2dProblemSize( N, H, W, C, K, R, S, P, Q, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, mode, split_k_slices, groups), D(D), T(T), Z(Z), pad_d(pad_d), stride_d(stride_d), dilation_d(dilation_d) { } /// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D // set *user-defined* output size and sets Z, P, and Q (include all data members in ctor) CUTLASS_HOST_DEVICE Conv3dProblemSize( cutlass::Tensor5DCoord input_size, // NDHWC cutlass::Tensor5DCoord filter_size, // KTRSC Coord3D padding, // pad_d, pad_h, pad_w Coord3D stride, // stride_d, stride_h, stride_w Coord3D dilation, // dilation_d, dilation_h, dilation_w cutlass::Tensor5DCoord output_size, // NZPQK cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): Conv2dProblemSize( {input_size.n(), input_size.h(), input_size.w(), input_size.c()}, {filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()}, {padding[1], padding[1], padding[2], padding[2]}, {stride[1], stride[2]}, {dilation[1], dilation[2]}, {output_size.n(), output_size.h(), output_size.w(), output_size.c()}, mode, split_k_slices, groups), D(input_size.d()), T(filter_size.d()), Z(output_size.d()), pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]) { } /// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D // *computes* output size and sets Z, P and Q (include all data members in ctor) CUTLASS_HOST_DEVICE Conv3dProblemSize( cutlass::Tensor5DCoord input_size, // NDHWC cutlass::Tensor5DCoord filter_size, // KTRSC Coord3D padding, // pad_d, pad_h, pad_w Coord3D stride, // stride_d, stride_h, stride_w Coord3D dilation, // dilation_d, dilation_h, dilation_w cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): Conv2dProblemSize( {input_size.n(), input_size.h(), input_size.w(), input_size.c()}, {filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()}, {padding[1], padding[1], padding[2], padding[2]}, {stride[1], stride[2]}, {dilation[1], dilation[2]}, mode, split_k_slices, groups), D(input_size.d()), T(filter_size.d()), pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]) { // set output Z Z = ((D + pad_d * 2 - T * dilation_d) / stride_d) + 1; } /// Constructs convolution problem size from cutlass Tensor5DCoord, Coord3D // *computes* output size and sets Z, P and Q (include all data members in ctor) CUTLASS_HOST_DEVICE Conv3dProblemSize( cutlass::Tensor5DCoord input_size, // NDHWC cutlass::Tensor5DCoord filter_size, // KTRSC CUTLASS_STL_NAMESPACE::tuple<Coord3D, Coord3D> padding, // Coord3D {pad_d, pad_h, pad_w} & Coord3D {far pad_d, pad_h, pad_w} to calculate o/p/q Coord3D stride, // stride_d, stride_h, stride_w Coord3D dilation, // dilation_d, dilation_h, dilation_w cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): Conv2dProblemSize( {input_size.n(), input_size.h(), input_size.w(), input_size.c()}, {filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()}, {CUTLASS_STL_NAMESPACE::get<0>(padding)[1], CUTLASS_STL_NAMESPACE::get<1>(padding)[1], CUTLASS_STL_NAMESPACE::get<0>(padding)[2], CUTLASS_STL_NAMESPACE::get<1>(padding)[2]}, {stride[1], stride[2]}, {dilation[1], dilation[2]}, mode, split_k_slices, groups), D(input_size.d()), T(filter_size.d()), pad_d(CUTLASS_STL_NAMESPACE::get<0>(padding)[0]), stride_d(stride[0]), dilation_d(dilation[0]) { // set output Z Z = ((D + pad_d + CUTLASS_STL_NAMESPACE::get<1>(padding)[0] - T * dilation_d) / stride_d) + 1; } /// Equality operator (ignores mode and split_k_slice) CUTLASS_HOST_DEVICE bool operator==(Conv3dProblemSize const &conv) const { return ( (N == conv.N) && (D == conv.D) && (H == conv.H) && (W == conv.W) && (C == conv.C) && (K == conv.K) && (T == conv.T) && (R == conv.R) && (S == conv.S) && (Z == conv.Z) &&(P == conv.P) && (Q == conv.Q) && (pad_d == conv.pad_d) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) && (stride_d == conv.stride_d) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) && (dilation_d == conv.dilation_d) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w) ); } /// Inequality operator CUTLASS_HOST_DEVICE bool operator!=(Conv3dProblemSize const &rhs) const { return !(*this == rhs); } // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv3dProblemSize reset_mode(cutlass::conv::Mode mode_) { Conv3dProblemSize tmp(*this); tmp.mode = mode_; return tmp; } // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv3dProblemSize reset_split_k_slices(int split_k_slices_) { Conv3dProblemSize tmp(*this); tmp.split_k_slices = split_k_slices_; return tmp; } /// Returns activation extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord activation_extent() const { return cutlass::Tensor5DCoord ({N, D, H, W, C}); } /// Returns filter extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord filter_extent(bool is_deconv = false) const { return is_deconv ? cutlass::Tensor5DCoord ({C, T, R, S, K}) : cutlass::Tensor5DCoord ({K, T, R, S, C}); } /// Returns output extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord output_extent() const { return cutlass::Tensor5DCoord ({N, Z, P, Q, K}); } /// Returns activation size in number of elements CUTLASS_HOST_DEVICE int64_t activation_size() const { return (N * D * H * W * C); } /// Returns filter size in number of elements CUTLASS_HOST_DEVICE int64_t filter_size() const { return (K * T * R * S * C); } /// Returns output size in number of elements CUTLASS_HOST_DEVICE int64_t output_size() const { return (N * Z * P * Q * K); } /// Returns padding as Coord3D CUTLASS_HOST_DEVICE Coord3D padding() const { return Coord3D ({pad_d, pad_h, pad_w}); } /// Returns stride as MatrixCoord CUTLASS_HOST_DEVICE Coord3D stride() const { return Coord3D ({stride_d, stride_h, stride_w}); } /// Returns dilation as MatrixCoord CUTLASS_HOST_DEVICE Coord3D dilation() const { return Coord3D ({dilation_d, dilation_h, dilation_w}); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // ImplicitGemm helper functions // //////////////////////////////////////////////////////////////////////////////////////////////////// /// Determine the problem size of the implicit GEMM operation CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord implicit_gemm_problem_size( Operator conv_operator, Conv3dProblemSize const &problem_size) { // Compute problem size switch (conv_operator) { case Operator::kFprop: return gemm::GemmCoord( problem_size.N * problem_size.Z * problem_size.P * problem_size.Q, problem_size.K, problem_size.T * problem_size.R * problem_size.S * problem_size.C ); case Operator::kDeconv: case Operator::kDgrad: return gemm::GemmCoord( problem_size.N * problem_size.D * problem_size.H * problem_size.W, problem_size.C, problem_size.T * problem_size.R * problem_size.S * problem_size.K ); case Operator::kWgrad: return gemm::GemmCoord( problem_size.K, problem_size.T * problem_size.R * problem_size.S * problem_size.C, problem_size.N * problem_size.Z * problem_size.P * problem_size.Q ); default: break; } return gemm::GemmCoord(); } // Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm CUTLASS_HOST_DEVICE int implicit_gemm_k_iterations( Operator conv_operator, int threadblock_K, Conv3dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic, GroupMode group_mode = GroupMode::kNone, int threadblock_N = 0) { int iterations = 0; int elements_per_split_k_slice = 0; if (group_mode == GroupMode::kNone) { switch (conv_operator) { case Operator::kFprop: elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kDeconv: case Operator::kDgrad: elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kWgrad: elements_per_split_k_slice = (problem_size.N * problem_size.Z * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K; break; default: break; } } else if (group_mode == GroupMode::kDepthwise) { int channels_per_cta = threadblock_N; if (algorithm == IteratorAlgorithm::kAnalytic) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.T * problem_size.R * problem_size.S * ((channels_per_cta + threadblock_K - 1) / threadblock_K); break; default: break; } } } return iterations; } //////////////////////////////////////////////////////////////////////////////// // Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output) //////////////////////////////////////////////////////////////////////////////// /// Returns ImplicitGemm tensor A extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord implicit_gemm_tensor_a_extent( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.output_extent(); default : break; } return cutlass::Tensor5DCoord(); } /// Returns ImplicitGemm tensor B extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord implicit_gemm_tensor_b_extent( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_extent(); case cutlass::conv::Operator::kDeconv: return problem_size.filter_extent(true); case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent(); default : break; } return cutlass::Tensor5DCoord(); } /// Returns ImplicitGemm tensor C extent as Tensor5DCoord CUTLASS_HOST_DEVICE cutlass::Tensor5DCoord implicit_gemm_tensor_c_extent( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent(); default : break; } return cutlass::Tensor5DCoord(); } /// Returns ImplicitGemm tensor A size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_a_size( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_size(); case cutlass::conv::Operator::kWgrad: return problem_size.output_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor B size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_b_size( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.filter_size(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor C size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_c_size( Operator conv_operator, Conv3dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_size(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_size(); default : break; } return 0; } } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/conv3d_problem_size.h/0
{ "file_path": "cutlass/include/cutlass/conv/conv3d_problem_size.h", "repo_id": "cutlass", "token_count": 7140 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::GroupMode GroupMode, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kUnity, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dGroupFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline that supports all GroupMode. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::GroupMode GroupMode, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, GroupMode, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA, GroupMode >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB, GroupMode >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Analytic IteratorAlgorithm and /// 2 stage pipeline that supports all GroupMode. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::GroupMode GroupMode, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, GroupMode, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA, GroupMode > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB, GroupMode > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and multistage /// pipeline that supports GroupMode::kSingleGroup. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, GroupMode::kSingleGroup, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode::kSingleGroup >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and /// 2 stage pipeline that supports GroupMode::kSingleGroup. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, GroupMode::kSingleGroup, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode::kSingleGroup >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/default_conv2d_group_fprop.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/default_conv2d_group_fprop.h", "repo_id": "cutlass", "token_count": 6519 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem > struct ImplicitGemmConvolutionStridedDgrad { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = conv::GroupMode::kNone; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // Strided dgrad uses a specialized threadblock swizzle for functionality and performance static_assert((platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradHorizontalThreadblockSwizzle>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<1>>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<4>>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<8>>::value), "Needs ThreadblockSwizzle type specialized for strided dgrad"); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size{}; TensorRefA ref_A{}; TensorRefB ref_B{}; TensorRefC ref_C{}; TensorRefC ref_D{}; typename EpilogueOutputOp::Params output_op{}; SplitKMode split_k_mode{}; // // Methods // /// Default ctor Arguments() = default; CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; FastDivmod stride_h_divmod{}; FastDivmod stride_w_divmod{}; int gemm_k_iterations{0}; typename Mma::IteratorA::Params iterator_A{}; typename Mma::IteratorA::Element const *ptr_A = nullptr; typename Mma::IteratorB::Params iterator_B{}; typename Mma::IteratorB::Element const *ptr_B = nullptr; typename Epilogue::OutputTileIterator::Params iterator_C{}; typename Epilogue::OutputTileIterator::Element *ptr_C = nullptr; typename Epilogue::OutputTileIterator::Params iterator_D{}; typename Epilogue::OutputTileIterator::Element *ptr_D = nullptr; typename EpilogueOutputOp::Params output_op {}; int *semaphore = nullptr; SplitKMode split_k_mode {}; // // Methods // Params() = default; /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), stride_h_divmod(args.problem_size.stride_h), stride_w_divmod(args.problem_size.stride_w), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size, ThreadblockShape::kM), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size, ThreadblockShape::kM), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode) { gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolutionStridedDgrad() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; // Compute starting filter position for strided dgrad int tile_m_per_filter = strided_dgrad_tile_m_per_filter(params.problem_size, ThreadblockShape::kM); int filter_tile_m = (threadblock_tile_idx.m() / tile_m_per_filter); // The subsequent fast_divmod() operations are equivalent to the following logical computation: // // int start_r = filter_tile_m / (params.problem_size.stride_w); // int start_s = filter_tile_m % (params.problem_size.stride_w); int start_r, start_s; params.stride_w_divmod(start_r, start_s, filter_tile_m); int filter_r = start_r; int filter_s = start_s; if (params.problem_size.mode == Mode::kConvolution) { filter_r = (params.problem_size.R - 1 - filter_r); filter_s = (params.problem_size.S - 1 - filter_s); } // Starting h, w positions for filter position in gemm_k=0 int start_h, start_w; strided_dgrad_starting_coords( params.problem_size, params.stride_h_divmod, params.stride_w_divmod, filter_r, filter_s, start_h, start_w); if (start_h >= params.problem_size.H || start_w >= params.problem_size.W) { return; } typename Mma::FragmentC accumulators; accumulators.clear(); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // Check if CTA contributes valid MMA (Dy * w) and accumulator will be non-zero after MMA if (start_r < params.problem_size.R && start_s < params.problem_size.S) { // Scale gemm_k_iterations for strided dgrad int gemm_k_iterations = (params.gemm_k_iterations / (params.problem_size.R * params.problem_size.S) ) * params.problem_size.num_gemm_k_filter_positions(start_r, start_s); // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.k() * Mma::Shape::kK ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, start_r, start_s, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); } // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); if (output_op.is_source_needed()) { // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, threadblock_offset); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Run epilogue with addend source iterator epilogue(output_op, iterator_D, accumulators, iterator_C); } else { // Run epilogue without addend source iterator epilogue(output_op, iterator_D, accumulators); } // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h", "repo_id": "cutlass", "token_count": 6556 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile) matrix from memory. This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename Layout_, typename ThreadMap_, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>, bool IsDeconv_ = false > class Conv2dFpropFilterTileAccessIteratorOptimized{ public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = Layout_; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static bool const IsDeconv = IsDeconv_; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // struct Params : Conv2dFpropFilterIteratorOptimizedParams<Layout> { CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Conv2dFpropFilterIteratorOptimizedParams<Layout> const &base): Conv2dFpropFilterIteratorOptimizedParams<Layout>(base) { } CUTLASS_HOST_DEVICE Params( Conv2dProblemSize const &problem_size, Layout const &layout ): Conv2dFpropFilterIteratorOptimizedParams<Layout>( problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} ) { } }; private: Conv2dFpropFilterIteratorOptimizedParams<Layout> const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; uint32_t predicates_[kAccessesPerVector]; int filter_rs_; int filter_c_; int channels_per_group_; // // Assertions // // We map predicates into bits packed in this uint32_t container static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8, "Currently, the number of loads per iteration is limited by the size of the predicates container."); public: CUTLASS_HOST_DEVICE Conv2dFpropFilterTileAccessIteratorOptimized( Conv2dFpropFilterIteratorOptimizedParams<Layout> const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), predicates_{0}, filter_rs_(0), filter_c_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.row() + thread_coord.contiguous(); Index column = threadblock_offset.column() + thread_coord.strided(); channels_per_group_ = (IsDeconv ? problem_size_.K : problem_size_.C) / problem_size_.groups; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < (IsDeconv ? problem_size_.C : problem_size_.K)) ? 1u : 0); CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { predicates_[v_idx] |= (pred << s); } } CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_); } pointer_ += ( params_.layout({filter_c_, column}) ) * sizeof_bits<Element>::value / 8; set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { LongIndex next = params_.inc_next_rs; // moves to the next tile ++filter_rs_; if (filter_rs_ == params_.RS) { filter_rs_ = 0; next = params_.inc_next_c; filter_c_ += params_.filter_c_delta; } CUTLASS_PRAGMA_UNROLL for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) { clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_); } pointer_ += next; } /// Clears the predicates CUTLASS_HOST_DEVICE void clear_mask(int v, bool clear = true) { predicates_[v] = clear ? 0u : predicates_[v]; } /// Returns true if the current coordinate is within the filter tensor W CUTLASS_HOST_DEVICE bool valid() { return (predicates_[iteration_vector_] & (1u << iteration_strided_)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_) + iteration_vector_; } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dFpropFilterTileAccessIteratorOptimized &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { // Move to the next K coordinate within the tile pointer_ += params_.inc_next_k; return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { auto input_channels = (IsDeconv ? problem_size.K : problem_size.C); auto output_channels = (IsDeconv ? problem_size.C : problem_size.K); // check alignment constraint on iterator's contiguous dimension if ((input_channels / problem_size.groups) % AccessType::kElements) { return Status::kErrorInvalidProblem; } if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) { if (output_channels % 32) { return Status::kErrorInvalidProblem; } } if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) { if (output_channels % 64) { return Status::kErrorInvalidProblem; } } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h", "repo_id": "cutlass", "token_count": 3640 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_ > class Conv3dWgradActivationTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; static_assert(sizeof_bits<Element>::value >= 8, "WGRAD requires elements of size 8b or greater."); // // Parameters structure // struct Params { Layout layout; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( Conv3dProblemSize const &problem_size, Layout const &layout ): layout(layout) { } }; private: Params const &params_; Conv3dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; // Filter postion (t,r,s,c) in contiguous dimension stays constant for each gemm_iteration_k int filter_t_[ThreadMap::Iterations::kContiguous]; int filter_r_[ThreadMap::Iterations::kContiguous]; int filter_s_[ThreadMap::Iterations::kContiguous]; int filter_c_[ThreadMap::Iterations::kContiguous]; int offset_nzpq_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv3dWgradActivationTileAccessIteratorAnalytic( Params const &params, Conv3dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); // initialize t,r,s,c filter position for every contiguous iteration CUTLASS_PRAGMA_UNROLL for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int trsc_offset = threadblock_offset.column() + thread_coord.contiguous() + c * ThreadMap::Delta::kContiguous; filter_t_[c] = trsc_offset / (problem_size_.R * problem_size_.S * problem_size_.C); int residual = trsc_offset % (problem_size_.R * problem_size_.S * problem_size_.C); filter_r_[c] = residual / (problem_size_.S * problem_size_.C); residual = residual % (problem_size_.S * problem_size_.C); filter_s_[c] = residual / problem_size_.C; filter_c_[c] = residual % problem_size_.C; } // initialize n, z, p, q offset for every strided iteration CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_nzpq_[s] = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; } } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // moves to the next GEMM-K offset (offset_nzpq_) in GEMM-B by a CTA-K tile CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_nzpq_[s] += Shape::kRow * problem_size_.split_k_slices; } } /// Returns the coordinate in the activation tensor x that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int t = filter_t_[iteration_contiguous_]; int r = filter_r_[iteration_contiguous_]; int s = filter_s_[iteration_contiguous_]; if (problem_size_.mode == Mode::kConvolution) { t = (problem_size_.T - 1 - t); r = (problem_size_.R - 1 - r); s = (problem_size_.S - 1 - s); } int n = offset_nzpq_[iteration_strided_] / (problem_size_.Z * problem_size_.P * problem_size_.Q); int residual = offset_nzpq_[iteration_strided_] % (problem_size_.Z * problem_size_.P * problem_size_.Q); int z = residual / (problem_size_.P * problem_size_.Q); residual = residual % (problem_size_.P * problem_size_.Q); int p = residual / problem_size_.Q; int q = residual % problem_size_.Q; int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d; int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h; int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w; return TensorCoord(n, d, h, w, filter_c_[iteration_contiguous_]); } /// Returns true if the current coordinate is within the activation tensor x CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.d() >= 0 && coord.d() < problem_size_.D && coord.h() >= 0 && coord.h() < problem_size_.H && coord.w() >= 0 && coord.w() < problem_size_.W && coord.c() < problem_size_.C; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dWgradActivationTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv3dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h", "repo_id": "cutlass", "token_count": 3334 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of scale and bias vectors. This iterator uses masks to guard out-of-bounds accesses. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/conv/threadblock/conv2d_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedScaleBiasVectorAccessIterator /// template <typename ThreadblockShape, typename Element, typename Layout> class PredicatedScaleBiasVectorAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data. /// template <typename ThreadblockShape_, typename Element_> class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_, Element_, layout::PitchLinear> { public: using ThreadblockShape = ThreadblockShape_; using Element = Element_; using Layout = layout::PitchLinear; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess; using AccessType = AlignedArray<Element, kElementsPerAccess>; using Params = PredicatedScaleBiasVectorAccessIteratorParams; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Parameters object with precomputed internal state Params const &params_; /// Internal pointer to first access of tile BytePointer pointer_; int problem_size_trs; int problem_size_c; int filter_trs_; TensorCoord thread_offset_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Precomputed parameters object Params const &params, /// Extent of tensor Conv2dProblemSize const &problem_size, /// Pointer to the start of the scale vector ConstPointer scale_pointer, /// Pointer to the start of the bias vector ConstPointer bias_pointer, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : params_(params), problem_size_trs(problem_size.R * problem_size.S), problem_size_c(problem_size.C), filter_trs_(0) { pointer_ = (thread_id < kThreads) ? reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(scale_pointer)) : reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(bias_pointer)); // Per-thread offset in logical coordinates of tensor int thread_base = (thread_id < kThreads) ? 0 : kThreads; thread_offset_ = threadblock_offset + TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0); set_iteration_index(0); } CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Precomputed parameters object Params const &params, /// Extent of tensor Conv3dProblemSize const &problem_size, /// Pointer to the start of the scale vector ConstPointer scale_pointer, /// Pointer to the start of the bias vector ConstPointer bias_pointer, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : params_(params), problem_size_trs(problem_size.T * problem_size.R * problem_size.S), problem_size_c(problem_size.C), filter_trs_(0) { pointer_ = (thread_id < kThreads) ? reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(scale_pointer)) : reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(bias_pointer)); // Per-thread offset in logical coordinates of tensor int thread_base = (thread_id < kThreads) ? 0 : kThreads; thread_offset_ = threadblock_offset + TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0); set_iteration_index(0); } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Precomputed parameters object Params const &params, /// Extent of tensor Conv2dProblemSize const &problem_size, /// Pointer to start of scale vector ConstPointer scale_pointer, /// Pointer to start of scale vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id) : PredicatedScaleBiasVectorAccessIterator(params, problem_size, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Precomputed parameters object Params const &params, /// Extent of tensor Conv3dProblemSize const &problem_size, /// Pointer to start of scale vector ConstPointer scale_pointer, /// Pointer to start of scale vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id) : PredicatedScaleBiasVectorAccessIterator(params, problem_size, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) {} /// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { thread_offset_ = thread_offset_ + TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>( pointer_ + (thread_offset_.contiguous() * sizeof_bits<Element>::value / 8)); } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator &operator++() { return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE void advance() { // moves to the next tile ++filter_trs_; if (filter_trs_ == problem_size_trs) { filter_trs_ = 0; add_tile_offset(TensorCoord(1, 0)); } } /// Increment and return an instance to self. CUTLASS_DEVICE PredicatedScaleBiasVectorAccessIterator operator++(int) { PredicatedScaleBiasVectorAccessIterator self(*this); operator++(); return self; } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { uint32_t enabled = 0; #if defined(_MSC_VER) || (__CUDACC_VER_MAJOR__ < 11) enabled = threadIdx.x < kThreads * 2; #else asm volatile( "{\n" " .reg .u32 tid_reg;\n" " .reg .pred p;\n" " mov.u32 tid_reg, %%tid.x;\n" " setp.lt.u32 p, tid_reg, %1;\n" " selp.u32 %0, 1, 0, p;\n" "}\n" : "+r"(enabled) :"n"(kThreads * 2)); #endif return ((thread_offset_.contiguous() < problem_size_c) && enabled); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename ThreadblockShape_, typename Element_> class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_, Element_, layout::RowMajor> { public: using ThreadblockShape = ThreadblockShape_; using Element = Element_; using Layout = layout::RowMajor; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator< layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>, Element, layout::PitchLinear>; using AccessType = typename UnderlyingIterator::AccessType; static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; using Params = PredicatedScaleBiasVectorAccessIteratorParams; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( ///< Precomputed parameters object Params const &params, ///< Extent of tensor Conv2dProblemSize const &problem_size, ///< Pointer to the start of the scale vector ConstPointer scale_pointer, ///< Pointer to the start of the bias vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params, problem_size, scale_pointer, bias_pointer, thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( ///< Precomputed parameters object Params const &params, ///< Extent of tensor Conv3dProblemSize const &problem_size, ///< Pointer to the start of the scale vector ConstPointer scale_pointer, ///< Pointer to the start of the bias vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params, problem_size, scale_pointer, bias_pointer, thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( Params const &params, ///< Precomputed parameters object Conv2dProblemSize const &problem_size, ///< Extent of tensor ConstPointer scale_pointer, ///< Pointer to the start of the scale vector ConstPointer bias_pointer, ///< Pointer to the start of the bias vector int thread_id ///< ID of each participating thread ) : PredicatedScaleBiasVectorAccessIterator(params, problem_size, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( Params const &params, ///< Precomputed parameters object Conv3dProblemSize const &problem_size, ///< Extent of tensor ConstPointer scale_pointer, ///< Pointer to the start of the scale vector ConstPointer bias_pointer, ///< Pointer to the start of the bias vector int thread_id ///< ID of each participating thread ) : PredicatedScaleBiasVectorAccessIterator(params, problem_size, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Advances an iterator along logical dimensions of matrix in units of whole /// threadblock tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator operator++(int) { PredicatedScaleBiasVectorAccessIterator self(*this); operator++(); return self; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE void advance() { iterator_.advance(); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h", "repo_id": "cutlass", "token_count": 6094 }
29
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/atom/mma_traits_sm90.hpp" #include "cute/atom/mma_traits_sm90_gmma.hpp" #include "cute/atom/copy_traits_sm90.hpp" #include "cutlass/detail/dependent_false.hpp" #include "cutlass/detail/layout.hpp" #include "cutlass/gemm/collective/builders/sm90_common.inl" #include "cutlass/epilogue/dispatch_policy.hpp" #include "cutlass/epilogue/collective/collective_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" #include "cutlass/epilogue/fusion/callbacks.hpp" #include "cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp" #if defined(__CUDACC_RTC__) #include <cuda/std/type_traits> #else #include <type_traits> #endif /////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::collective { /////////////////////////////////////////////////////////////////////////////// namespace detail { // Returns the parameterized dispatch policy for the TMA epilogue template<class TileShapeMNK, class EpilogueTileMN, class ElementC, class ElementD, class Schedule> constexpr auto sm90_get_tma_dispatch_policy() { using namespace cute; constexpr int EpiTiles = size(shape_div(take<0,2>(TileShapeMNK{}), EpilogueTileMN{})); constexpr int FragmentSize = size(EpilogueTileMN{}) / (detail::sm90_is_cooperative_v<Schedule> ? 256 : 128); // 8b residuals load fast and consume little smem, so the perf cost of waiting on stores to finish outweighs the cost of extra allocation constexpr bool ReuseSmem = (sizeof_bits_v<ElementC> == sizeof_bits_v<ElementD>) && (sizeof_bits_v<ElementD> > 8); constexpr bool DelayTmaStore = is_void_v<ElementC>; // TMA store delay performs worse with residual loads constexpr int StagesD = cute::min(EpiTiles, 2); constexpr int StagesC = ReuseSmem ? cute::max(cute::min(EpiTiles, 4), StagesD+1) : cute::min(EpiTiles, 4); return Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmem, DelayTmaStore>{}; } // Returns the smem layout atom to be used for C or D matrix template<class GmemStrideType, class Element, class EpilogueTile_MN> constexpr auto sm90_get_epilogue_smem_swizzle_layout_atom() { using namespace cute; // ColMajor C/D (M-major) if constexpr (cutlass::gemm::detail::is_major<0>(GmemStrideType{})) { return cutlass::gemm::collective::detail::ss_smem_selector< cute::GMMA::Major::MN, Element, decltype(get<0>(EpilogueTile_MN{})), decltype(get<1>(EpilogueTile_MN{})) >(); } // RowMajor C/D (N-major) else if constexpr (cutlass::gemm::detail::is_major<1>(GmemStrideType{})) { return cutlass::gemm::collective::detail::ss_smem_selector< cute::GMMA::Major::K , Element, decltype(get<0>(EpilogueTile_MN{})), decltype(get<1>(EpilogueTile_MN{})) >(); } else { static_assert(cutlass::detail::dependent_false<GmemStrideType>, "Unsupported gmem layout."); } } // Attempts to compute a reasonable epilogue tile based on block tile shape or allows the user to provide one. template <class ElementD, class EpilogueTileType, class Schedule, class TileShape_MNK> constexpr auto sm90_compute_tile_shape_or_override() { if constexpr (cute::is_same_v<EpilogueTileType, EpilogueTileAuto>) { auto epi_tile = [&] () { if constexpr (detail::sm90_is_cooperative_v<Schedule>) { auto tile_m = cute::min(_128{}, size<0>(TileShape_MNK{})); auto tile_n = cute::min(_32{}, size<1>(TileShape_MNK{})); return make_shape(tile_m, tile_n); } else if constexpr (detail::sm90_is_warp_specialized_v<Schedule>) { constexpr int N_perf = sizeof_bits_v<ElementD> == 8 ? 64 : 32; auto tile_m = cute::min(_64{}, size<0>(TileShape_MNK{})); auto tile_n = cute::min(Int<N_perf>{}, size<1>(TileShape_MNK{})); return make_shape(tile_m, tile_n); } else { static_assert(cutlass::detail::dependent_false<Schedule>, "Unsupported schedule."); } }(); return cute::transform(epi_tile, seq<0,1>{}, [] (auto epi_tiler, auto I) { auto cta_tiler = make_layout(get<I>(TileShape_MNK{})); // This is a multimodal CTA tiler, transform before returning if constexpr (depth(cta_tiler) > 0) { // This is an implicit multimodal tiler, match profile and return if constexpr (tuple_size_v<decltype(shape(cta_tiler))> == 1) { return make_tile(epi_tiler); } // This is an explicit multimodal tiler, compose out epi tiler else { return composition(cta_tiler, epi_tiler); } } // This is a flat CTA tiler, no need for transformation else { return epi_tiler; } }); } else if constexpr (cute::is_tuple<EpilogueTileType>::value) { EpilogueTileType epi_tile; constexpr int M = size<0>(shape(epi_tile)); constexpr int N = size<1>(shape(epi_tile)); static_assert(!is_layout<EpilogueTileType>::value, "EpilogueTile must be a cute::Tile or cute::Shape"); static_assert(M == 64 && detail::sm90_is_warp_specialized_v<Schedule> || M == 128 && detail::sm90_is_cooperative_v<Schedule>, "Unsupported tile shape"); static_assert(N % 16 == 0, "Unsupported tile shape"); return epi_tile; } else { static_assert(cutlass::detail::dependent_false<EpilogueTileType>, "Invalid type for EpilogueTileType."); } } // Selects the largest vectorized smem store atom available template <class GmemStrideTypeD, class ElementD> constexpr auto sm90_get_smem_store_op_for_accumulator() { using namespace cute; if constexpr (sizeof(ElementD) == 2 && size<0>(GmemStrideTypeD{}) == 1) { return SM90_U16x8_STSM_T{}; } else if constexpr (sizeof(ElementD) == 2 && size<1>(GmemStrideTypeD{}) == 1) { return SM90_U32x4_STSM_N{}; } else { // auto-vectorizing store return AutoVectorizingCopyWithAssumedAlignment{}; } } // Selects the largest vectorized smem load atom available template <class GmemStrideTypeC, class ElementC> constexpr auto sm90_get_smem_load_op_for_source() { using namespace cute; // Reuse the logic from smem store selector using SmemStoreOp = decltype(sm90_get_smem_store_op_for_accumulator<GmemStrideTypeC, ElementC>()); if constexpr (cute::is_same_v<SmemStoreOp, SM90_U16x8_STSM_T>) { return SM75_U16x8_LDSM_T{}; } else if constexpr (cute::is_same_v<SmemStoreOp, SM90_U32x4_STSM_N>) { return SM75_U32x4_LDSM_N{}; } else { // auto-vectorizing load return AutoVectorizingCopyWithAssumedAlignment<128>{}; } } // callbacks builder with TMA aux out template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class FusionOp, class TileShape_MNK, class EpilogueTile_MN, class ElementAccumulator > struct CallbacksBuilder< Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, FusionOp, TileShape_MNK, EpilogueTile_MN, ElementAccumulator, cute::enable_if_t<(FusionOp::IsAuxOutSupported ^ FusionOp::IsAuxInSupported) // only one aux tensor && not cute::is_subbyte_v<typename FusionOp::ElementAux>> > { using GmemStrideTypeAux = gemm::TagToStrideC_t<typename FusionOp::GmemLayoutTagAux>; using SmemLayoutAtomAux = decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom< GmemStrideTypeAux, typename FusionOp::ElementAux, EpilogueTile_MN>()); using CopyOpR2S = decltype(detail::sm90_get_smem_store_op_for_accumulator< GmemStrideTypeAux, typename FusionOp::ElementAux>()); using CopyOpS2R = decltype(detail::sm90_get_smem_load_op_for_source< GmemStrideTypeAux, typename FusionOp::ElementAux>()); using SmemCopyOpAux = cute::conditional_t<FusionOp::IsAuxOutSupported, CopyOpR2S, CopyOpS2R>; using Callbacks = fusion::FusionCallbacks< Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, FusionOp, TileShape_MNK, EpilogueTile_MN, SmemLayoutAtomAux, SmemCopyOpAux >; }; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class FusionOp, class TileShape_MNK, class EpilogueTile_MN, class ElementAccumulator > struct CallbacksBuilder< Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, FusionOp, TileShape_MNK, EpilogueTile_MN, ElementAccumulator, cute::enable_if_t<(FusionOp::IsAuxOutSupported ^ FusionOp::IsAuxInSupported) // only one aux tensor && sizeof_bits_v<typename FusionOp::ElementAux> == 1> > { using Callbacks = fusion::FusionCallbacks< Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, FusionOp, TileShape_MNK, EpilogueTile_MN, Layout<_1,_0>, DefaultCopy // aux bit tensor doesn't use smem >; }; // Helper for building TMA warp-specialized collective epilogues, specialized by // the fusion operation performed and the dispatch policy to use. template < class TileShape_MNK, class EpilogueTile_MN, class ElementAccumulator, class ElementCompute, class ElementC_, class GmemLayoutTagC_, int AlignmentC, class ElementD_, class GmemLayoutTagD, int AlignmentD, class FusionOpOrCallbacks, class DispatchPolicy > struct Sm90TmaBuilderImpl { // Passing void D disables destination store + smem allocation using ElementD = cute::conditional_t<cute::is_void_v<ElementD_>, fusion::get_element_aux_t<FusionOpOrCallbacks>, ElementD_>; // Passing void C disables source load + smem allocation using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,ElementD,ElementC_>; // prevents void ref breakages using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,GmemLayoutTagD,GmemLayoutTagC_>; using GmemStrideTypeC = cutlass::detail::TagToStrideC_t<GmemLayoutTagC>; using GmemStrideTypeD = cutlass::detail::TagToStrideC_t<GmemLayoutTagD>; using CopyOpS2G = cute::conditional_t<detail::is_im2col_mode<GmemLayoutTagD>, SM90_TMA_STORE_IM2COL, SM90_TMA_STORE >; using CopyOpG2S = cute::conditional_t<detail::is_im2col_mode<GmemLayoutTagC>, SM90_TMA_LOAD_IM2COL, SM90_TMA_LOAD >; // TMA builder allows for passing callbacks directly, which is either a fusion::FusionCallbacks // instance or a direct visitor implementation, e.g. fusion::Sm90LinearCombination using FusionCallbacks = typename CallbacksBuilder< DispatchPolicy, FusionOpOrCallbacks, TileShape_MNK, EpilogueTile_MN, ElementAccumulator >::Callbacks; using CollectiveOp = cutlass::epilogue::collective::CollectiveEpilogue< DispatchPolicy, TileShape_MNK, EpilogueTile_MN, ElementC_, // Need to pass void through to expose via GemmUniversal GmemStrideTypeC, ElementD_, GmemStrideTypeD, FusionCallbacks, CopyOpG2S, decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeC, ElementC, EpilogueTile_MN>()), decltype(detail::sm90_get_smem_load_op_for_source<GmemStrideTypeC, ElementC>()), CopyOpS2G, decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeD, ElementD, EpilogueTile_MN>()), decltype(detail::sm90_get_smem_store_op_for_accumulator<GmemStrideTypeD, ElementD>()) >; }; /////////////////////////////////////////////////////////////////////////////// // Descriptor classes for defining EVT nodes // Some of the epilogue visitor nodes require non-intuitive template arguments // such as CopyOpS2R for AuxLoad node. Traditionaly, these are resolved by the // builder classes. Here we provide a set of descriptor classes that resolve // these template arguments from more intuitive types such as Stride, Layout // Get TileShape, EpilogueTile, Dispatch Policy, StagesC, and STagesD template< typename TileShape_MNK, typename EpilogueTileType, typename ElementC, typename ElementD, typename Schedule > struct EpilogueDescriptor { using TileShape = TileShape_MNK; using EpilogueTile = decltype( detail::sm90_compute_tile_shape_or_override< ElementD, EpilogueTileType, Schedule, TileShape_MNK >() ); using DispatchPolicy = decltype( detail::sm90_get_tma_dispatch_policy< TileShape_MNK, EpilogueTile, ElementC, ElementD, Schedule >() ); constexpr static int StagesC = DispatchPolicy::StagesC; constexpr static int StagesD = DispatchPolicy::StagesD; }; // Get Stride, SmemLayout, and CopyOpS2R for AuxLoad node template< typename EpilogueDescriptor, typename StrideOrLayoutTag, typename ElementAux > struct AuxLoadDescriptor { constexpr static int Stages = EpilogueDescriptor::StagesC; using EpilogueTile = typename EpilogueDescriptor::EpilogueTile; using Element = ElementAux; using Stride = cutlass::detail::TagToStrideC_t<StrideOrLayoutTag>; using SmemLayoutAtom = decltype( detail::sm90_get_epilogue_smem_swizzle_layout_atom< Stride, ElementAux, typename EpilogueDescriptor::EpilogueTile >() ); using CopyOpS2R = decltype(detail::sm90_get_smem_load_op_for_source<Stride, ElementAux>()); }; // Get Stride, SmemLayout, and CopyOpS2R for AuxStore node template< typename EpilogueDescriptor, typename StrideOrLayoutTag, typename ElementAux > struct AuxStoreDescriptor { constexpr static int Stages = EpilogueDescriptor::StagesD; using EpilogueTile = typename EpilogueDescriptor::EpilogueTile; using Element = ElementAux; using Stride = cutlass::detail::TagToStrideC_t<StrideOrLayoutTag>; using SmemLayoutAtom = decltype( detail::sm90_get_epilogue_smem_swizzle_layout_atom< Stride, ElementAux, typename EpilogueDescriptor::EpilogueTile >() ); using CopyOpR2S = decltype(detail::sm90_get_smem_store_op_for_accumulator<Stride, ElementAux>()); }; template< typename EpilogueDescriptor, typename ElementVector > struct RowBroadcastDescriptor { constexpr static int Stages = ceil_div( EpilogueDescriptor::StagesC, size(shape_div(take<0, 2>(typename EpilogueDescriptor::TileShape{}), typename EpilogueDescriptor::EpilogueTile{})) ) + 1; using Element = ElementVector; }; } // namespace detail /////////////////////////////////////////////////////////////////////////////// // No-smem builder template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC_, class GmemLayoutTagC_, int AlignmentC, class ElementD, class GmemLayoutTagD, int AlignmentD, class Schedule, FloatRoundStyle RoundStyle > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC_, GmemLayoutTagC_, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, Schedule, fusion::LinearCombination<ElementD,ElementCompute,ElementC_,ElementCompute,RoundStyle>, cute::enable_if_t<cute::is_same_v<Schedule, NoSmemWarpSpecialized> || cute::is_same_v<Schedule, PtrArrayNoSmemWarpSpecialized> >> { // Passing void C disables source load using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>, ElementD, ElementC_>; // prevents cute breakages using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>, GmemLayoutTagD, GmemLayoutTagC_>; static constexpr thread::ScaleType::Kind ScaleType = cute::is_void_v<ElementC_> ? thread::ScaleType::OnlyAlphaScaling : thread::ScaleType::Default; static constexpr int FragmentSize = 1; using ThreadOp = thread::LinearCombination< ElementD, FragmentSize, ElementAccumulator, ElementCompute, ScaleType, RoundStyle, ElementC>; using CollectiveOp = cute::conditional_t< cute::is_same_v<Schedule, NoSmemWarpSpecialized>, cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::DefaultEpilogue< cutlass::detail::TagToStrideC_t<GmemLayoutTagC>, cutlass::detail::TagToStrideC_t<GmemLayoutTagD>, ThreadOp, cutlass::gemm::EpilogueDefault>>, // Epilogue for Ptr-Array and Grouped Gemm cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::DefaultEpilogueArray< cutlass::detail::TagToStrideC_t<GmemLayoutTagC>, cutlass::detail::TagToStrideC_t<GmemLayoutTagD>, ThreadOp, Schedule>> >; }; // Tma warp-specialized builder template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC, class GmemLayoutTagC, int AlignmentC, class ElementD_, class GmemLayoutTagD, int AlignmentD, class Schedule, class FusionOperation > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD_, GmemLayoutTagD, AlignmentD, Schedule, FusionOperation, cute::enable_if_t<cute::is_same_v<Schedule, TmaWarpSpecialized> || cute::is_same_v<Schedule, TmaWarpSpecializedCooperative> >> { private: using ElementD = cute::conditional_t<cute::is_void_v<ElementD_>, fusion::get_element_aux_t<FusionOperation>, ElementD_>; using EpilogueTile_MN = decltype(detail::sm90_compute_tile_shape_or_override<ElementD, EpilogueTileType, Schedule, TileShape_MNK>()); using DispatchPolicy = decltype(detail::sm90_get_tma_dispatch_policy<TileShape_MNK,EpilogueTile_MN,ElementC,ElementD,Schedule>()); public: using CollectiveOp = typename detail::Sm90TmaBuilderImpl< TileShape_MNK, EpilogueTile_MN, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD_, GmemLayoutTagD, AlignmentD, FusionOperation, DispatchPolicy >::CollectiveOp; }; // Auto builder template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC, class GmemLayoutTagC, int AlignmentC, class ElementD, class GmemLayoutTagD, int AlignmentD, class FusionOperation > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, EpilogueScheduleAuto, FusionOperation, void> { private: static_assert(cute::is_same_v<FusionOperation, fusion::LinearCombination<ElementD,ElementCompute,ElementC,ElementCompute>>, "Auto schedule doesn't support fusion. Use one of the TmaWarpSpecialized schedules instead."); // Pick No-Smem epilogue as the Auto Epilogue Schedule (Auto schedules do not guarantee best performance) // since TMA epilogues are not compatible with non-TMA non-WS mainloops using EpilogueSchedule = NoSmemWarpSpecialized; using _CollectiveBuilder = CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, EpilogueSchedule, FusionOperation >; public: using CollectiveOp = typename _CollectiveBuilder::CollectiveOp; }; // DEPRECATED Tma warp-specialized builder for elementwise fusion template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC, class GmemLayoutTagC, int AlignmentC, class ElementD, class GmemLayoutTagD, int AlignmentD, class Schedule, class UnusedFusionOp > struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombEltAct instead")]] CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, Schedule, UnusedFusionOp, cute::enable_if_t<cute::is_base_of_v<TmaWarpSpecializedElementwiseBase, Schedule> || cute::is_base_of_v<TmaWarpSpecializedCooperativeElementwiseBase, Schedule> >> { private: using FusionOp = fusion::LinCombEltAct<Schedule::template ActivationFunctor, ElementD, ElementCompute, ElementC, ElementCompute, Schedule::Round>; using ImplSchedule = cute::conditional_t<cute::is_base_of_v<TmaWarpSpecializedElementwiseBase, Schedule>, TmaWarpSpecialized, TmaWarpSpecializedCooperative>; public: using CollectiveOp = typename CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC, GmemLayoutTagC, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, ImplSchedule, FusionOp >::CollectiveOp; }; // DEPRECATED Tma warp-specialized builder for bias + elementwise fusion template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC_, class GmemLayoutTagC_, int AlignmentC, class ElementD, class GmemLayoutTagD, int AlignmentD, class Schedule, class UnusedFusionOp > struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombPerRowBiasEltAct or fusion::LinCombPerRowBiasEltActAux instead")]] CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC_, GmemLayoutTagC_, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, Schedule, UnusedFusionOp, cute::enable_if_t<cute::is_base_of_v<TmaWarpSpecializedBiasElementwiseBase, Schedule> || cute::is_base_of_v<TmaWarpSpecializedCooperativeBiasElementwiseBase, Schedule> >> { private: using EpilogueTile_MN = decltype(detail::sm90_compute_tile_shape_or_override< ElementD, EpilogueTileType, Schedule, TileShape_MNK>()); // MSVC doesn't seem to be able to deduce DispatchPolicy correctly if it's // defined as decltype of a detail::sm90_get_tma_dispatch_policy call. // Instead, we paste in the contents of that function. A natural refactoring // would be to create a type alias in the detail namespace. using DispatchPolicy = Sm90TmaWarpSpecialized< /* StagesC = */ size(shape_div(take<0, 2>(TileShape_MNK{}), EpilogueTile_MN{})), /* StagesD = */ 2, /* FragmentSize = */ size(EpilogueTile_MN{}) / (detail::sm90_is_cooperative_v<Schedule> ? 256 : 128), /* ReuseSmemC = */ sizeof_bits_v<ElementC_> == sizeof_bits_v<ElementD>, false >; using GmemStrideTypeAux = gemm::TagToStrideC_t<GmemLayoutTagD>; using SmemLayoutAtomAux = decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom< GmemStrideTypeAux, typename Schedule::ElementT, EpilogueTile_MN>()); using SmemCopyOpAux = decltype(detail::sm90_get_smem_store_op_for_accumulator< GmemStrideTypeAux, typename Schedule::ElementT>()); using FusionOperationAux = fusion::LinCombPerRowBiasEltActAux< GmemLayoutTagD, Schedule::template ActivationFunctor, ElementD, ElementCompute, typename Schedule::ElementT, typename Schedule::ElementBias, ElementC_, ElementCompute >; using FusionCallbacksAux = fusion::FusionCallbacks< DispatchPolicy, FusionOperationAux, TileShape_MNK, EpilogueTile_MN, SmemLayoutAtomAux, SmemCopyOpAux >; using FusionOperationNoAux = fusion::LinCombPerRowBiasEltAct< Schedule::template ActivationFunctor, ElementD, ElementCompute, typename Schedule::ElementBias, ElementC_, ElementCompute >; using FusionCallbacksNoAux = fusion::FusionCallbacks< DispatchPolicy, FusionOperationNoAux, TileShape_MNK, EpilogueTile_MN >; using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,ElementD,ElementC_>; // prevents void ref breakages using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,GmemLayoutTagD,GmemLayoutTagC_>; using GmemStrideTypeC = gemm::TagToStrideC_t<GmemLayoutTagC>; using GmemStrideTypeD = gemm::TagToStrideC_t<GmemLayoutTagD>; public: using CollectiveOp = cutlass::epilogue::collective::Sm90EpilogueTmaWarpSpecializedBiasElementwise< DispatchPolicy::StagesC, DispatchPolicy::StagesD, DispatchPolicy::FragmentSize, TileShape_MNK, EpilogueTile_MN, ElementC_, // Need to pass void through to expose via GemmUniversal GmemStrideTypeC, ElementD, GmemStrideTypeD, cute::conditional_t<Schedule::StoreT, FusionCallbacksAux, FusionCallbacksNoAux>, SM90_TMA_LOAD, decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeC, ElementC, EpilogueTile_MN>()), decltype(detail::sm90_get_smem_load_op_for_source<GmemStrideTypeC, ElementC>()), SM90_TMA_STORE, decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeD, ElementD, EpilogueTile_MN>()), decltype(detail::sm90_get_smem_store_op_for_accumulator<GmemStrideTypeD, ElementD>()) >; }; // CollectiveBuilder that transposed epilogue below is used for sm90 gmma RS TT kernels // since swapping NNN kernels input matrix and transposing its output at the same time then // we can get TTN kernel. template < class TileShape_MNK, class ClusterShape_MNK, class EpilogueTileType, class ElementAccumulator, class ElementCompute, class ElementC_, class GmemLayoutTagC_, int AlignmentC, class ElementD, class GmemLayoutTagD, int AlignmentD, FloatRoundStyle RoundStyle > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, ElementAccumulator, ElementCompute, ElementC_, GmemLayoutTagC_, AlignmentC, ElementD, GmemLayoutTagD, AlignmentD, cutlass::gemm::EpilogueTransposed, fusion::LinearCombination<ElementD,ElementCompute,ElementC_,ElementCompute,RoundStyle>, void> { // Passing void C disables source load using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>, ElementD, ElementC_>; // prevents cute breakages using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>, GmemLayoutTagD, GmemLayoutTagC_>; static constexpr thread::ScaleType::Kind ScaleType = cute::is_void_v<ElementC_> ? thread::ScaleType::OnlyAlphaScaling : thread::ScaleType::Default; static constexpr int FragmentSize = 1; using ThreadOp = thread::LinearCombination< ElementD, FragmentSize, ElementAccumulator, ElementCompute, ScaleType, RoundStyle, ElementC>; using CollectiveOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::DefaultEpilogue< cutlass::detail::TagToStrideC_t<GmemLayoutTagC>, cutlass::detail::TagToStrideC_t<GmemLayoutTagD>, ThreadOp, cutlass::gemm::EpilogueTransposed> >; }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::collective
cutlass/include/cutlass/epilogue/collective/builders/sm90_builder.inl/0
{ "file_path": "cutlass/include/cutlass/epilogue/collective/builders/sm90_builder.inl", "repo_id": "cutlass", "token_count": 11110 }
30
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree store operations for the sm90 TMA warp-specialized (ws) epilogue */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/workspace.h" #include "cute/tensor.hpp" #include "sm90_visitor_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::fusion { using namespace cute; using namespace detail; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // Elementwise Store Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages, class EpilogueTile, class Element, FloatRoundStyle RoundStyle, class StrideMNL, class SmemLayoutAtom, class CopyOpR2S, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Noop on nullptr params > struct Sm90AuxStore { using ElementAux = Element; static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet"); constexpr static bool is_m_major = epilogue::collective::detail::is_m_major<StrideMNL>(); // Find the max contiguous layout usable by TMA (if EpilogueTile is a non-compact tiler) using SmemShapeTma = decltype(make_shape( max_common_vector(make_layout(get<0>(EpilogueTile{})),make_layout(get<0>(EpilogueTile{}))), max_common_vector(make_layout(get<1>(EpilogueTile{})),make_layout(get<1>(EpilogueTile{}))))); using SmemLayoutTma = decltype(tile_to_shape( SmemLayoutAtom{}, SmemShapeTma{}, cute::conditional_t<is_m_major, Step<_2,_1>, Step<_1,_2>>{} )); using SmemLayout = decltype(tile_to_shape( SmemLayoutTma{}, make_shape(size<0>(shape(EpilogueTile{})), size<1>(shape(EpilogueTile{})), Int<Stages>{}), cute::conditional_t<is_m_major, Step<_2,_1,_3>, Step<_1,_2,_3>>{} )); struct SharedStorage { alignas(cutlass::detail::alignment_for_swizzle(SmemLayout{})) array_aligned<Element, size(SmemLayout{})> smem_aux; }; struct Arguments { Element* ptr_aux = nullptr; StrideMNL dAux = {}; }; struct Params { using TMA_Aux = decltype(make_tma_copy( SM90_TMA_STORE{}, make_tensor(static_cast<Element*>(nullptr), repeat_like(StrideMNL{}, int32_t(0)), StrideMNL{}), SmemLayoutTma{})); TMA_Aux tma_store_aux; bool is_nullptr = false; }; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) auto problem_shape_mnkl = append<4>(problem_shape, 1); auto [M, N, K, L] = problem_shape_mnkl; bool is_nullptr = false; if constexpr (EnableNullptr) { is_nullptr = args.ptr_aux == nullptr; } typename Params::TMA_Aux tma_store_aux; if (not is_nullptr) { Tensor tensor_aux = make_tensor(args.ptr_aux, make_layout(make_shape(M,N,L), args.dAux)); tma_store_aux = make_tma_copy(SM90_TMA_STORE{}, tensor_aux, SmemLayoutTma{}); } return {tma_store_aux, is_nullptr}; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } CUTLASS_HOST_DEVICE Sm90AuxStore() { } CUTLASS_HOST_DEVICE Sm90AuxStore(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params), smem_aux(const_cast<Element*>(shared_storage.smem_aux.data())) { } Params const* params_ptr; Element* smem_aux; CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { return EmptyProducerLoadCallbacks{}; } template < class RTensor, class TiledR2S, class STensorR2S, class STensorS2G, class GTensorS2G > struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks( RTensor&& tC_rAux, TiledR2S tiled_r2s, STensorR2S&& tRS_sAux, STensorS2G&& bSG_sAux, GTensorS2G&& bSG_gAux, Params const* params_ptr) : tiled_r2s(tiled_r2s), tC_rAux(cute::forward<RTensor>(tC_rAux)), tRS_sAux(cute::forward<STensorR2S>(tRS_sAux)), bSG_sAux(cute::forward<STensorS2G>(bSG_sAux)), bSG_gAux(cute::forward<GTensorS2G>(bSG_gAux)), params_ptr(params_ptr) {} TiledR2S tiled_r2s; RTensor tC_rAux; // (CPY,CPY_M,CPY_N) STensorR2S tRS_sAux; // (R2S,R2S_M,R2S_N,PIPE) STensorS2G bSG_sAux; // (S2G,S2G_M,S2G_N,PIPE) GTensorS2G bSG_gAux; // (S2G,S2G_M,S2G_N,EPI_M,EPI_N) Params const* params_ptr; template <typename ElementAccumulator, typename ElementInput, int FragmentSize> CUTLASS_DEVICE auto visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n, Array<ElementInput, FragmentSize> const& frg_input) { using ConvertInput = NumericArrayConverter<Element, ElementInput, FragmentSize, RoundStyle>; ConvertInput convert_input{}; Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux)); // (EPI_V) tC_rAux_frg(epi_v) = convert_input(frg_input); return frg_input; } CUTLASS_DEVICE void postreduce(int epi_m, int epi_n, int store_iteration, bool issue_smem_store) { if constexpr (EnableNullptr) { if (params_ptr->is_nullptr) { return; } } using RLayoutR2S = decltype(cute::layout(TiledR2S{}.get_slice(0).retile_S(RTensor{}))); Tensor tRS_rAux = make_tensor(tC_rAux.data(), RLayoutR2S{}); // (R2S,R2S_M,R2S_N) if (issue_smem_store) { int store_pipe_index = store_iteration % Stages; copy(tiled_r2s, tRS_rAux, tRS_sAux(_,_,_,store_pipe_index)); } } CUTLASS_DEVICE void tma_store(int epi_m, int epi_n, int store_iteration, bool issue_tma_store) { if constexpr (EnableNullptr) { if (params_ptr->is_nullptr) { return; } } if (issue_tma_store) { // Issue the TMA store int store_pipe_index = store_iteration % Stages; copy(params_ptr->tma_store_aux, bSG_sAux(_,_,_,store_pipe_index), bSG_gAux(_,_,_,epi_m,epi_n)); } } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { auto [M, N, K, L] = args.problem_shape_mnkl; auto [m, n, k, l] = args.tile_coord_mnkl; Tensor mAux = params_ptr->tma_store_aux.get_tma_tensor(make_shape(M,N,L)); // (M,N,L) Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N) Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) gAux, args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tC_rAux = make_tensor<Element>(take<0,3>(shape(tC_gAux))); // (CPY,CPY_M,CPY_N) Tensor sAux_epi = cute::as_position_independent_swizzle_tensor( make_tensor(make_smem_ptr(smem_aux), SmemLayout{})); // (EPI_TILE_M,EPI_TILE_N,PIPE) Tensor gAux_epi = flat_divide(gAux, args.epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N) auto tiled_r2s = conditional_return<ReferenceSrc>( make_tiled_copy_S(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy), make_tiled_copy_D(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy) ); auto tRS_sAux = tiled_r2s.get_slice(args.thread_idx).partition_D(sAux_epi); // (R2S,R2S_M,R2S_N,PIPE) ThrCopy thrblk_s2g = params_ptr->tma_store_aux.get_slice(_0{}); Tensor bSG_sAux = thrblk_s2g.partition_S(sAux_epi); // (TMA,TMA_M,TMA_N,PIPE) Tensor bSG_gAux = thrblk_s2g.partition_D(gAux_epi); // (TMA,TMA_M,TMA_N,EPI_M,EPI_N) return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tiled_r2s), decltype(tRS_sAux), decltype(bSG_sAux), decltype(bSG_gAux)>( cute::move(tC_rAux), tiled_r2s, cute::move(tRS_sAux), cute::move(bSG_sAux), cute::move(bSG_gAux), params_ptr); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Reduction Store Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// // Scalar reduction template < template <class> class RegReduceFn, template <class> class GmemReduceFn, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_0,_0,_0>, bool EnableNullptr = true // Noop on nullptr params > struct Sm90ScalarReduction { private: static_assert( (cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>) || // scalar reduction, e.g. tensor max element (cute::is_same_v<StrideMNL, Stride<_0,_0, _1>>) || // batched scalar reduction, e.g. per-batch max element (cute::is_same_v<StrideMNL, Stride<_0,_0,int>>)); static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value; static_assert(IsAtomic, "non-atomic scalar reduction not supported yet"); public: struct SharedStorage { }; struct Arguments { ElementOutput* ptr_scalar = nullptr; ElementCompute reduction_identity = ElementCompute(0); StrideMNL dScalar = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { if constexpr (IsAtomic) { auto [M, N, K, L] = problem_shape; Layout mScalar_layout = make_layout(make_shape(M,N,L), args.dScalar); if (args.ptr_scalar != nullptr) { return fill_workspace(args.ptr_scalar, ElementOutput(args.reduction_identity), cosize(mScalar_layout), stream, cuda_adapter); } } return cutlass::Status::kSuccess; } CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_HOST_DEVICE Sm90ScalarReduction() { } CUTLASS_HOST_DEVICE Sm90ScalarReduction(Params const& params, SharedStorage const& shared_storage) : params(params) { } Params const params; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { return EmptyProducerLoadCallbacks{}; } template<class CTensor, class ResidueMN> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks( int l_coord, CTensor tCcScalar, ResidueMN residue_mn, Params const& params) : scalar(params.reduction_identity), l_coord(l_coord), tCcScalar(tCcScalar), residue_mn(residue_mn), params(params) {} ElementCompute scalar; int l_coord; CTensor tCcScalar; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) ResidueMN residue_mn; Params params; template <typename ElementAccumulator, typename ElementInput, int FragmentSize> CUTLASS_DEVICE auto visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n, Array<ElementInput, FragmentSize> const& frg_input) { if constexpr (EnableNullptr) { if (params.ptr_scalar == nullptr) { return frg_input; } } using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; using ReduceInput = RegReduceFn<ElementCompute>; ConvertInput convert_input{}; ReduceInput reduce_input{}; Array frg_I = convert_input(frg_input); Tensor tCcScalar_mn = tCcScalar(_,_,_,epi_m,epi_n); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { if (elem_less(tCcScalar_mn(epi_v * FragmentSize + i), residue_mn)) { scalar = reduce_input(scalar, frg_I[i]); } } return frg_input; } CUTLASS_DEVICE void end() { if constexpr (EnableNullptr) { if (params.ptr_scalar == nullptr) { return; } } using ConvertI = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; using ReduceInput = GmemReduceFn<ElementOutput>; ConvertI convert_I{}; ReduceInput reduce_input{}; ElementOutput* ptr_scalar = params.ptr_scalar + l_coord * get<2>(params.dScalar); reduce_input(ptr_scalar, convert_I(scalar)); } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { return ConsumerStoreCallbacks<decltype(args.tCcD), decltype(args.residue_mn)>( get<3>(args.tile_coord_mnkl), args.tCcD, args.residue_mn, params); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Row vector reduction template < template <class> class RegReduceFn, template <class> class ShuffleReduceFn, template <class> class GmemReduceFn, int Stages, class CtaTileShapeMNK, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_0,_1,_0>, int Alignment = 128 / sizeof_bits_v<ElementOutput>, bool EnableNullptr = true, // Noop on nullptr params // If this is false, ptr_row is assumed to point to a compact n-major (ceil_div(M,CTA_M), round_nearest(N,CTA_N), L) // tensor of ElementCompute. It is the user's responsibility to reduce this to a (N, L) tensor of ElementOutput bool FinalReduction = true, // False means skip OOB predication if OOB inputs are known to be the reduction identity bool VisitCheckOOB = true > struct Sm90RowReduction { private: static_assert(Stages == 0, "Smem usage not supported yet"); static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet"); static_assert( (cute::is_same_v<StrideMNL, Stride<_0,_1, _0>>) || // row vector reduction, e.g. per-col sum over all batches (cute::is_same_v<StrideMNL, Stride<_0,_1,int>>)); // batched row vector reduction, e.g. per-col sum per batch static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value; static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final"); public: struct SharedStorage { }; struct Arguments { void* ptr_row = nullptr; // ElementOutput* if FinalReduction, else ElementCompute* ElementCompute reduction_identity = 0; StrideMNL dRow = {}; }; struct Params { void* ptr_row = nullptr; ElementCompute reduction_identity = 0; StrideMNL dRow = {}; ElementCompute* reduction_buffer = nullptr; int* tile_counters = nullptr; }; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { ElementCompute* reduction_buffer; int* tile_counters = nullptr; if constexpr (IsAtomic) { reduction_buffer = nullptr; } else if constexpr (not FinalReduction) { reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_row); } else { auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M), size<>(N), L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute); tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int)); reduction_buffer = reinterpret_cast<ElementCompute*>(workspace); tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset); } return { args.ptr_row, args.reduction_identity, args.dRow, reduction_buffer, tile_counters }; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { if constexpr (IsAtomic || not FinalReduction) { return 0; } size_t workspace_size = 0; auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; // Increment by size of reduction buffer workspace_size += product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute); // Align and increment by size of tile counters workspace_size = round_nearest(workspace_size, sizeof(int)); workspace_size += cute::ceil_div(size<>(N), tile_N) * sizeof(int); return workspace_size; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { if constexpr (IsAtomic) { auto [M, N, K, L] = problem_shape; Layout mRow_layout = make_layout(make_shape(M,N,L), args.dRow); if (args.ptr_row != nullptr) { return fill_workspace(args.ptr_row, ElementOutput(args.reduction_identity), cosize(mRow_layout), stream, cuda_adapter); } return Status::kSuccess; } auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute); int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset); size_t tile_counters_size = cute::ceil_div(size<>(N), tile_N) * sizeof(int); return zero_workspace(tile_counters, tile_counters_size, stream); } CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_HOST_DEVICE Sm90RowReduction() { } CUTLASS_HOST_DEVICE Sm90RowReduction(Params const& params, SharedStorage const& shared_storage) : params(params) { } Params params; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { return EmptyProducerLoadCallbacks{}; } template<class ArgsTuple> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params) : args_tuple(cute::forward<ArgsTuple>(args_tuple)), params(params) {} ArgsTuple args_tuple; Params const& params; bool do_final_reduction = false; template <typename ElementAccumulator, typename ElementInput, int FragmentSize> CUTLASS_DEVICE auto visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n, Array<ElementInput, FragmentSize> const& frg_input) { if constexpr (EnableNullptr) { if (params.ptr_row == nullptr) { return frg_input; } } auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; Tensor tCrRow_mn = tCrRow(_,_,_,epi_m,epi_n); Tensor tCcRow_mn = tCcRow(_,_,_,epi_m,epi_n); using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; using ReduceInput = RegReduceFn<ElementCompute>; ConvertInput convert_input{}; ReduceInput reduce_input{}; Array frg_I = convert_input(frg_input); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { if constexpr (VisitCheckOOB) { if (elem_less(tCcRow_mn(epi_v * FragmentSize + i), residue_mn)) { ElementCompute& tCrRow_vmn = tCrRow_mn(epi_v * FragmentSize + i); tCrRow_vmn = reduce_input(tCrRow_vmn, frg_I[i]); } } else { ElementCompute& tCrRow_vmn = tCrRow_mn(epi_v * FragmentSize + i); tCrRow_vmn = reduce_input(tCrRow_vmn, frg_I[i]); } } return frg_input; } template <class STensor, class SyncFn> CUTLASS_DEVICE void reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration) { if (not is_last_iteration) { return; } auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; auto [m, n, k, l] = tile_coord_mnkl; constexpr bool ReferenceSrc = decltype(ref_src)::value; if constexpr (EnableNullptr) { if (params.ptr_row == nullptr) { return; } } // fully OOB CTA in partially OOB cluster if (not elem_less(cRow(_0{},_0{}), residue_mn)) { return; } // // 1. Warp shuffle reduction // using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>; using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>; ReduceShuffle reduce_shuffle{}; Tensor tCrRow_frg = recast<FragmentShuffle>(filter(tCrRow)); CUTLASS_PRAGMA_UNROLL for (int reduction_rows = size<0>(lane_layout_MN) / 2; reduction_rows > 0; reduction_rows /= 2) { CUTLASS_PRAGMA_UNROLL for (int frg_idx = 0; frg_idx < size(tCrRow_frg); ++frg_idx) { uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrRow_frg(frg_idx)); frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(reduction_rows, _0{})); tCrRow_frg(frg_idx) = reduce_shuffle(tCrRow_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl)); } } bool is_reduced_lane = get<0>(lane_mn) == 0; // // 2. Atomic reduction // if constexpr (IsAtomic) { // Filter so we don't issue redunant copies over stride-0 modes Tensor tCrRow_flt = filter_zeros(tCrRow); Tensor tCcRow_flt = make_tensor(tCcRow.data(), make_layout(tCrRow_flt.shape(), tCcRow.stride())); Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>(gRow_l(_,_,l), epi_tile, tiled_copy, thread_idx); Tensor tCgRow_flt = filter_zeros(tCgRow); // NOTE: atomic reduction is performed in the output type using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; using ReduceOutput = GmemReduceFn<ElementOutput>; ConvertOutput convert_output{}; ReduceOutput reduce_output{}; if (is_reduced_lane) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(tCrRow_flt); ++i) { if (elem_less(tCcRow_flt(i), residue_mn)) { reduce_output(&tCgRow_flt(i), convert_output(tCrRow_flt(i))); } } } sync_fn(); } // // 2. One warp in M, skip threadblock smem reduction // else if constexpr (decltype(size<0>(warp_layout_MN))::value <= 1) { // Dump warp reduction to gmem workspace using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>; Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_ml(_,_,m,l), epi_tile, tiled_copy, thread_idx); if (is_reduced_lane) { // Filter so we don't issue redundant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) copy_aligned(filter(tCrRow), recast<ElementGmem>(filter(tCgBuf))); } sync_fn(); } // // 2. Multiple warps in M, do threadblock smem reduction // else { Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout); static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <= decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type), "smem reduction buffer not large enough, use a larger epilogue tile"); // Dump warp reduction to smem workspace Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<0>(warp_mn)), epi_tile, tiled_copy, thread_idx); if (is_reduced_lane) { // Filter so we don't issue redunant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) copy_aligned(filter(tCrRow), filter(tCsBuf)); } sync_fn(); constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute)); using FragmentSmem = Array<ElementCompute, SmemFragSize>; using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>; using ReduceSmem = GmemReduceFn<FragmentSmem>; ReduceSmem reduce_smem{}; Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf)); Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf)); constexpr int FragsPerRow = decltype(size<1>(sBuf_frg))::value; // Do the threadblock smem reduction CUTLASS_PRAGMA_UNROLL for (int reduction_rows = size<0>(warp_layout_MN) / 2; reduction_rows > 1; reduction_rows /= 2) { int FragsPerReduction = reduction_rows * FragsPerRow; CUTLASS_PRAGMA_NO_UNROLL for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) { FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction)); sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem); } sync_fn(); } // Do final smem reduction and dump to gmem workspace using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>; Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_ml(_,_,m,l))); CUTLASS_PRAGMA_NO_UNROLL for (int frg_idx = thread_idx; frg_idx < FragsPerRow; frg_idx += size(tiled_copy)) { FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerRow)); gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem); } sync_fn(); } // // 3. Increment atomic counters to signal final gmem reduction // if constexpr (not IsAtomic && FinalReduction) { // Ensure gmem writes are visible to other threads before incrementing counter __threadfence(); sync_fn(); // Collective thread 0 increments atomic tile counter and copies value to smem int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data())); if (thread_idx == 0) { *prev_tile_count = atomicAdd(&params.tile_counters[n], 1); } sync_fn(); // Broadcast tile count to other threads in CTA and determine final reduction status do_final_reduction = *prev_tile_count == size<2>(gBuf_ml) * size<3>(gBuf_ml) - 1; sync_fn(); } } CUTLASS_DEVICE void end() { // // 4. Do final gmem reduction if necessary // if constexpr (not IsAtomic && FinalReduction) { if (not do_final_reduction) { return; } auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; using ReduceOutput = GmemReduceFn<ElementCompute>; using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; ReduceOutput reduce_output{}; ConvertOutput convert_output{}; // Reduction over batches if (size<2>(stride(gRow_l)) == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) { Tensor tRgBuf_ml = gBuf_ml(_0{},n,_,_); ElementCompute output = tRgBuf_ml(_0{}); CUTLASS_PRAGMA_NO_UNROLL for (int ml = 1; ml < size(tRgBuf_ml); ++ml) { output = reduce_output(output, tRgBuf_ml(ml)); } if (elem_less(cRow(_0{},n), residue_mn)) { gRow_l(_0{},n,_0{}) = convert_output(output); } } } // No reduction over batches else { CUTLASS_PRAGMA_NO_UNROLL for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) { bool do_store = elem_less(cRow(_0{},n), residue_mn); CUTLASS_PRAGMA_NO_UNROLL for (int l = 0; l < size<3>(gBuf_ml); ++l) { Tensor tRgBuf_m = gBuf_ml(_0{},n,_,l); ElementCompute output = tRgBuf_m(_0{}); CUTLASS_PRAGMA_NO_UNROLL for (int m = 1; m < size(tRgBuf_m); ++m) { output = reduce_output(output, tRgBuf_m(m)); } if (do_store) { gRow_l(_0{},n,l) = convert_output(output); } } } } } } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { Layout ref_layout_MN = [&] () { if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); } else { return get<0>(args.tiled_copy.get_layoutD_MN()); } }(); // tile_mn -> tv_idx // Get the MN layout + coord of lanes to determine shuffle reduction iterations using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>; Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn int lane_idx = canonical_lane_idx(); auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN)); // Get the MN layout + coord of warps to determine smem reduction iterations Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn int warp_idx = args.thread_idx / NumThreadsPerWarp; auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN)); // Partition output gmem and register tensors auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk; auto [M, N, K, L] = args.problem_shape_mnkl; auto [m, n, k, l] = args.tile_coord_mnkl; Tensor mRow = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_row), make_shape(M,N,L), params.dRow); // (M,N,L) Tensor gRow_l = local_tile(mRow, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L) Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) gRow_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tCrRow = make_tensor_like<ElementCompute>(tCgRow); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) fill(tCrRow, params.reduction_identity); // Partition gmem+smem reduction buffer tensors Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_0{}, _1{})); auto block_shape = ceil_div(make_shape(M,N,L), shape(gBuf_layout)); // (M_CNT, N_CNT, L_CNT) // Let the M_CNT (the num of partial reduction results) become the outer mode Layout block_layout = make_layout(block_shape, make_stride(get<1>(block_shape), _1{}, get<0>(block_shape) * get<1>(block_shape))); Layout mBuf_layout = blocked_product(gBuf_layout, block_layout); Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L) Tensor gBuf_ml = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(_,n,_)); // (CTA_M,CTA_N,REST_M,L) Layout sBuf_layout = blocked_product(gBuf_layout, // (CTA_M,CTA_N,WARPS_M) make_layout(make_shape(_1{},_1{},size<0>(warp_layout_MN)))); auto args_tuple = make_tuple( bool_constant<ReferenceSrc>{}, cute::move(tCrRow), args.tCcD, gRow_l, args.cD, gBuf_ml, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, args.tile_coord_mnkl, args.residue_mn, args.epi_tile, args.tiled_copy, args.thread_idx); return ConsumerStoreCallbacks<decltype(args_tuple)>(cute::move(args_tuple), params); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Col vector reduction template < template <class> class RegReduceFn, template <class> class ShuffleReduceFn, template <class> class GmemReduceFn, int Stages, class CtaTileShapeMNK, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_1,_0,_0>, int Alignment = 128 / sizeof_bits_v<ElementOutput>, bool EnableNullptr = true, // Noop on nullptr params // If this is false, ptr_col is assumed to point to a compact m-major (round_nearest(M,CTA_M), ceil_div(N,CTA_N), L) // tensor of ElementCompute. It is the user's responsibility to reduce this to a (M, L) tensor of ElementOutput bool FinalReduction = true, // False means skip OOB predication if OOB inputs are known to be the reduction identity bool VisitCheckOOB = true > struct Sm90ColReduction { private: static_assert(Stages == 0, "Smem usage not supported yet"); static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet"); static_assert( (cute::is_same_v<StrideMNL, Stride<_1,_0, _0>>) || // col vector reduction, e.g. per-row sum over all batches (cute::is_same_v<StrideMNL, Stride<_1,_0,int>>)); // batched col vector reduction, e.g. per-row sum per batch static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value; static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final"); public: struct SharedStorage { }; struct Arguments { void* ptr_col = nullptr; // ElementOutput* if FinalReduction, else ElementCompute* ElementCompute reduction_identity = 0; StrideMNL dCol = {}; }; struct Params { void* ptr_col = nullptr; ElementCompute reduction_identity = 0; StrideMNL dCol = {}; ElementCompute* reduction_buffer = nullptr; int* tile_counters = nullptr; }; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { ElementCompute* reduction_buffer; int* tile_counters = nullptr; if constexpr (IsAtomic) { reduction_buffer = nullptr; } else if constexpr (not FinalReduction) { reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_col); } else { auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute); tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int)); reduction_buffer = reinterpret_cast<ElementCompute*>(workspace); tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset); } return { args.ptr_col, args.reduction_identity, args.dCol, reduction_buffer, tile_counters }; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { if constexpr (IsAtomic || not FinalReduction) { return 0; } size_t workspace_size = 0; auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; // Increment by size of reduction buffer workspace_size += product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute); // Align and increment by size of tile counters workspace_size = round_nearest(workspace_size, sizeof(int)); workspace_size += cute::ceil_div(M, tile_M) * sizeof(int); return workspace_size; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { if constexpr (IsAtomic) { auto [M, N, K, L] = problem_shape; Layout mCol_layout = make_layout(make_shape(M,N,L), args.dCol); if (args.ptr_col != nullptr) { return fill_workspace(args.ptr_col, ElementOutput(args.reduction_identity), cosize(mCol_layout), stream, cuda_adapter); } return Status::kSuccess; } auto [M, N, K, L] = problem_shape; auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{}; size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute); tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int)); int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset); size_t tile_counters_size = cute::ceil_div(M, tile_M) * sizeof(int); return zero_workspace(tile_counters, tile_counters_size, stream); } CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_HOST_DEVICE Sm90ColReduction() { } CUTLASS_HOST_DEVICE Sm90ColReduction(Params const& params, SharedStorage const& shared_storage) : params(params) { } Params params; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { return EmptyProducerLoadCallbacks{}; } template<class ArgsTuple> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params) : args_tuple(cute::forward<ArgsTuple>(args_tuple)), params(params) {} ArgsTuple args_tuple; Params const& params; bool do_final_reduction = false; template <typename ElementAccumulator, typename ElementInput, int FragmentSize> CUTLASS_DEVICE auto visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n, Array<ElementInput, FragmentSize> const& frg_input) { if constexpr (EnableNullptr) { if (params.ptr_col == nullptr) { return frg_input; } } auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; Tensor tCrCol_mn = tCrCol(_,_,_,epi_m,epi_n); Tensor tCcCol_mn = tCcCol(_,_,_,epi_m,epi_n); using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; using ReduceInput = RegReduceFn<ElementCompute>; ConvertInput convert_input{}; ReduceInput reduce_input{}; Array frg_I = convert_input(frg_input); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { if constexpr (VisitCheckOOB) { if (elem_less(tCcCol_mn(epi_v * FragmentSize + i), residue_mn)) { ElementCompute& tCrCol_vmn = tCrCol_mn(epi_v * FragmentSize + i); tCrCol_vmn = reduce_input(tCrCol_vmn, frg_I[i]); } } else { if (elem_less(tCcCol_mn(epi_v * FragmentSize + i), residue_mn)) { ElementCompute& tCrCol_vmn = tCrCol_mn(epi_v * FragmentSize + i); tCrCol_vmn = reduce_input(tCrCol_vmn, frg_I[i]); } } } return frg_input; } template <class STensor, class SyncFn> CUTLASS_DEVICE void reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration) { if (not is_last_iteration) { return; } auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; auto [m, n, k, l] = tile_coord_mnkl; constexpr bool ReferenceSrc = decltype(ref_src)::value; // Runtime nullptr is noop if constexpr (EnableNullptr) { if (params.ptr_col == nullptr) { return; } } // fully OOB CTA in partially OOB cluster if (not elem_less(cCol(_0{},_0{}), residue_mn)) { return; } // // 1. Warp shuffle reduction // using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>; using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>; ReduceShuffle reduce_shuffle{}; Tensor tCrCol_frg = recast<FragmentShuffle>(filter(tCrCol)); CUTLASS_PRAGMA_UNROLL for (int reduction_cols = size<1>(lane_layout_MN) / 2; reduction_cols > 0; reduction_cols /= 2) { CUTLASS_PRAGMA_UNROLL for (int frg_idx = 0; frg_idx < size(tCrCol_frg); ++frg_idx) { uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrCol_frg(frg_idx)); frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(_0{},reduction_cols)); tCrCol_frg(frg_idx) = reduce_shuffle(tCrCol_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl)); } } bool is_reduced_lane = get<1>(lane_mn) == 0; // // 2. Atomic reduction // if constexpr (IsAtomic) { // Filter so we don't issue redunant copies over stride-0 modes Tensor tCrCol_flt = filter_zeros(tCrCol); Tensor tCcCol_flt = make_tensor(tCcCol.data(), make_layout(tCrCol_flt.shape(), tCcCol.stride())); Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>(gCol_l(_,_,l), epi_tile, tiled_copy, thread_idx); Tensor tCgCol_flt = filter_zeros(tCgCol); // NOTE: atomic reduction is performed in the output type using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; using ReduceOutput = GmemReduceFn<ElementOutput>; ConvertOutput convert_output{}; ReduceOutput reduce_output{}; if (is_reduced_lane) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(tCrCol_flt); ++i) { if (elem_less(tCcCol_flt(i), residue_mn)) { reduce_output(&tCgCol_flt(i), convert_output(tCrCol_flt(i))); } } } sync_fn(); } // // 2. One warp in N, skip threadblock smem reduction // else if constexpr (decltype(size<1>(warp_layout_MN))::value <= 1) { // Dump warp reduction to gmem workspace using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>; Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_nl(_,_,n,l), epi_tile, tiled_copy, thread_idx); if (is_reduced_lane) { // Filter so we don't issue redundant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) copy_aligned(filter(tCrCol), recast<ElementGmem>(filter(tCgBuf))); } sync_fn(); } // // 2. Multiple warps in N, do threadblock smem reduction // else { Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout); static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <= decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type), "smem reduction buffer not large enough, use a larger epilogue tile"); // Dump warp reduction to smem workspace Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<1>(warp_mn)), epi_tile, tiled_copy, thread_idx); if (is_reduced_lane) { // Filter so we don't issue redunant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) copy_aligned(filter(tCrCol), filter(tCsBuf)); } sync_fn(); constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute)); using FragmentSmem = Array<ElementCompute, SmemFragSize>; using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>; using ReduceSmem = GmemReduceFn<FragmentSmem>; ReduceSmem reduce_smem{}; Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf)); Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf)); constexpr int FragsPerCol = decltype(size<0>(sBuf_frg))::value; // Do the threadblock smem reduction CUTLASS_PRAGMA_UNROLL for (int reduction_cols = size<1>(warp_layout_MN) / 2; reduction_cols > 1; reduction_cols /= 2) { int FragsPerReduction = reduction_cols * FragsPerCol; CUTLASS_PRAGMA_NO_UNROLL for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) { FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction)); sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem); } sync_fn(); } // Do final smem reduction and dump to gmem workspace using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>; Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_nl(_,_,n,l))); CUTLASS_PRAGMA_NO_UNROLL for (int frg_idx = thread_idx; frg_idx < FragsPerCol; frg_idx += size(tiled_copy)) { FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerCol)); gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem); } sync_fn(); } // // 3. Increment atomic counters to signal final gmem reduction // if constexpr (not IsAtomic && FinalReduction) { // Ensure gmem writes are visible to other threads before incrementing counter __threadfence(); sync_fn(); // Collective thread 0 increments atomic tile counter and copies value to smem int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data())); if (thread_idx == 0) { *prev_tile_count = atomicAdd(&params.tile_counters[m], 1); } sync_fn(); // Broadcast tile count to other threads in CTA and determine final reduction status do_final_reduction = *prev_tile_count == size<2>(gBuf_nl) * size<3>(gBuf_nl) - 1; sync_fn(); } } CUTLASS_DEVICE void end() { // // 4. Do final gmem reduction if necessary // if constexpr (not IsAtomic && FinalReduction) { if (not do_final_reduction) { return; } auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple; using ReduceOutput = GmemReduceFn<ElementCompute>; using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; ReduceOutput reduce_output{}; ConvertOutput convert_output{}; // Reduction over batches if (size<2>(stride(gCol_l)) == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) { Tensor tRgBuf_nl = gBuf_nl(m,_0{},_,_); ElementCompute output = tRgBuf_nl(_0{}); CUTLASS_PRAGMA_NO_UNROLL for (int nl = 1; nl < size(tRgBuf_nl); ++nl) { output = reduce_output(output, tRgBuf_nl(nl)); } if (elem_less(cCol(m,_0{}), residue_mn)) { gCol_l(m,_0{},_0{}) = convert_output(output); } } } // No reduction over batches else { CUTLASS_PRAGMA_NO_UNROLL for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) { bool do_store = elem_less(cCol(m,_0{}), residue_mn); CUTLASS_PRAGMA_NO_UNROLL for (int l = 0; l < size<3>(gBuf_nl); ++l) { Tensor tRgBuf_n = gBuf_nl(m,_0{},_,l); ElementCompute output = tRgBuf_n(_0{}); CUTLASS_PRAGMA_NO_UNROLL for (int n = 1; n < size(tRgBuf_n); ++n) { output = reduce_output(output, tRgBuf_n(n)); } if (do_store) { gCol_l(m,_0{},l) = convert_output(output); } } } } } } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { Layout ref_layout_MN = [&] () { if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); } else { return get<0>(args.tiled_copy.get_layoutD_MN()); } }(); // tile_mn -> tv_idx // Get the MN layout + coord of lanes to determine shuffle reduction iterations using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>; Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn int lane_idx = canonical_lane_idx(); auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN)); // Get the MN layout + coord of warps to determine smem reduction iterations Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn int warp_idx = args.thread_idx / NumThreadsPerWarp; auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN)); // Partition output gmem and register tensors auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk; auto [M, N, K, L] = args.problem_shape_mnkl; auto [m, n, k, l] = args.tile_coord_mnkl; Tensor mCol = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_col), make_shape(M,N,L), params.dCol); // (M,N,L) Tensor gCol_l = local_tile(mCol, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L) Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) gCol_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tCrCol = make_tensor_like<ElementCompute>(tCgCol); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) fill(tCrCol, params.reduction_identity); // Partition gmem+smem reduction buffer tensors Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_1{}, _0{})); Layout mBuf_layout = blocked_product(gBuf_layout, make_layout(ceil_div(make_shape(M,N,L), shape(gBuf_layout)))); Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L) Tensor gBuf_nl = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(m,_,_)); // (CTA_M,CTA_N,REST_N,L) Layout sBuf_layout = blocked_product(gBuf_layout,make_layout(make_shape(_1{},_1{},size<1>(warp_layout_MN)))); // (CTA_M,CTA_N,WARPS_N) auto args_tuple = make_tuple( bool_constant<ReferenceSrc>{}, cute::move(tCrCol), args.tCcD, gCol_l, args.cD, gBuf_nl, sBuf_layout, lane_layout_MN, lane_mn, warp_layout_MN, warp_mn, args.tile_coord_mnkl, args.residue_mn, args.epi_tile, args.tiled_copy, args.thread_idx); return ConsumerStoreCallbacks<decltype(args_tuple)>(std::move(args_tuple), params); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Batch matrix reduction template < int Stages, class EpilogueTile, class Element, class StrideMNL, class CopyOpR2S, class SmemLayoutAtom, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Noop on nullptr params > struct Sm90MatrixReduction; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::fusion /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 25760 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// struct LinearCombinationParams { uint64_t alpha_data[2]; uint64_t beta_data[2]; CUTLASS_HOST_DEVICE LinearCombinationParams() : alpha_data {0lu, 0lu}, beta_data {0lu, 0lu} { } template <typename ElementCompute> CUTLASS_HOST_DEVICE LinearCombinationParams(ElementCompute alpha, ElementCompute beta) : alpha_data {0lu, 0lu}, beta_data {0lu, 0lu} { #if defined(__CUDA_ARCH__) reinterpret_cast<ElementCompute&>(alpha_data) = alpha; reinterpret_cast<ElementCompute&>(beta_data) = beta; #else memcpy( alpha_data, &alpha, sizeof(ElementCompute) ); memcpy( beta_data, &beta, sizeof(ElementCompute) ); #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/linear_combination_params.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_params.h", "repo_id": "cutlass", "token_count": 799 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_relu0.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_hardswish.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/interleaved_epilogue.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename ElementOutput, typename ElementAccumulator, int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, ElementAccumulator, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, ElementAccumulator >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float <= float x 4 template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<float, float, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; static int const kFragmentsPerIteration = 2; }; /// Partial specialization for int32_t <= int32_t template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<int32_t, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float <= int32_t template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<float, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for half <= float x 8 epilogues avoids shared memory bank conflicts. template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< half_t, float, 8, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, 16, 8, 8 >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, 16, 8, 8 >; static int const kFragmentsPerIteration = 2; }; /// Partial specialization for half <= int32_t x 8 epilogues avoids shared memory bank conflicts. template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< half_t, int32_t, 8, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, int32_t, 32, 16, 8, 8 >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, int32_t, 32, 16, 8, 8 >; static int const kFragmentsPerIteration = 2; }; /// Partial specialization for int8/int4b_t <= int32 x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < typename ElementOutput, int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< ElementOutput, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { static_assert(platform::is_same<ElementOutput, cutlass::int4b_t>::value || platform::is_same<ElementOutput, cutlass::uint4b_t>::value || platform::is_same<ElementOutput, int8_t>::value || platform::is_same<ElementOutput, uint8_t>::value, "ElementOutput needs to be 4 or 8 bit (unsigned) int."); static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, int32_t, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, int32_t, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float_e4m3_t <= float x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< cutlass::float_e4m3_t, float, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using ElementOutput = cutlass::float_e4m3_t; static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float_e5m2_t <= float x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< cutlass::float_e5m2_t, float, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using ElementOutput = cutlass::float_e5m2_t; static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; } // namespace detail //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute, conv::StrideSupport StrideSupport = conv::StrideSupport::kUnity, int Rank = 4 > struct DefaultEpilogueTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; static conv::StrideSupport const kStrideSupport = StrideSupport; static int const kRank = Rank; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value; using PackedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout, UseCUDAStore >; using StridedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorConv< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout, UseCUDAStore, kRank >; using OutputTileIterator = typename platform::conditional<StrideSupport == cutlass::conv::StrideSupport::kUnity, PackedOutputTileIterator, StridedOutputTileIterator>::type; using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueTensorOpStridedDgrad { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < int Rank, typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueTensorOpAffineRankN { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; // Map to the row major iterator since the iterator selection for affineN is the same. using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, layout::RowMajor>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, layout::RowMajor> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps which uses /// intereleaved output layout. For this case, shared memory is not needed. template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, int InterleavedK, bool isSplitK = false> struct DefaultInterleavedEpilogueTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock:: DefaultInterleavedThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, InterleavedK>::Type; using OutputTileIterator = cutlass::epilogue::threadblock::InterleavedPredicatedTileIterator< OutputTileThreadMap, ElementOutput, InterleavedK>; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, OutputOp, InterleavedK>; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps which uses /// intereleaved output layout. For this case, shared memory is not needed. template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, int InterleavedK, bool isSplitK = false> struct DefaultInterleavedConvEpilogue { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock:: DefaultInterleavedConvThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, InterleavedK>::Type; using OutputTileIterator = cutlass::epilogue::threadblock::InterleavedConvPredicatedTileIterator< OutputTileThreadMap, ElementOutput, InterleavedK>; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, // can reuse the gemm version here to do element selection layout::ColumnMajorInterleaved<InterleavedK>>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, OutputOp, InterleavedK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h", "repo_id": "cutlass", "token_count": 10917 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs and convolution using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_ ///< Output operator > class EpilogueDirectStore { public: using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; using WarpShape = typename WarpMmaOperator_::Shape; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using OutputOp = OutputOp_; using Padding = MatrixShape<0, 0>; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Use this to control the granularity of one epilogue 'iteration' static int const kFragmentsPerIteration = 1; static int constexpr kSmemTiles = 1; static int constexpr kSmemPointerOffset = 0; /// Shared storage allocation needed by the epilogue struct SharedStorage { } ; private: // Assume accumulator tile is multipile interleaved 32x32 tile. static int const kElementsPerPartial = 4; using EleShapePerPatial = typename platform::conditional< platform::is_same<ElementAccumulator, float>::value, MatrixShape<2, 2>, MatrixShape<1, 4> >::type; static int const kElementsPerMma = 8; static int const kAccumulatorPatials = 2; using QuadShapePerPatialMma = MatrixShape<4, 4>; static_assert(OutputOp::kCount >= 2, "The direct store epilogue for Tensor Ops requires the output functor have kCount >= 2."); private: LongIndex warp_offset; int thread_idx; int warp_idx; int lane_idx; int warp_m, warp_n; // warp coordinates within a cta int tid_m, tid_n; // thread coordinates within a warp public: /// Constructor CUTLASS_DEVICE EpilogueDirectStore( SharedStorage &shared_storage, ///< Shared storage object int thread_idx_, ///< ID of a thread within the threadblock int warp_idx_, ///< ID of warp within threadblock int lane_idx_ ///< Id of thread within warp ): thread_idx(thread_idx_), warp_idx(warp_idx_), lane_idx(lane_idx_) { // warp offsetting calculations warp_offset = warp_idx * WarpShape::kM * WarpShape::kN; int warp_id_mn = warp_idx % (WarpCount::kM * WarpShape::kN); warp_m = warp_id_mn % WarpCount::kM; warp_n = warp_id_mn / WarpCount::kM; MatrixCoord warp_offset_coord(warp_m*WarpShape::kM, warp_n*WarpShape::kN); // thread offsetting calculations int quad = (lane_idx >> 2); int lane_in_quad = (lane_idx & 3); // this seems to be te correct layout tid_m = quad; tid_n = 2 * lane_in_quad; } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) if (!output_op.is_source_needed()) { compute_source_not_needed_(output_op, destination_iterator, accumulators); } else { compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); } } private: /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) const int kAccumBlockN = 2; const int kThreadsM = 8; const int kThreadsN = 4; const int kBlockM = WarpShape::kM / kThreadsM; /// Array type used to output using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>; /// Array type passed to the output operator - unused elements are optimized away using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>; /// Array type used by output functor using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>; /// Array type used by output functor using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>; AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators); CUTLASS_PRAGMA_UNROLL for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) { int accum_m = kThreadsM * accum_m_idx; int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m; int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n; ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride; ElementOutput *source_ptr = source_iterator.pointer + mL * source_iterator.stride; int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN; CUTLASS_PRAGMA_UNROLL for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) { int accum_idx = accum_m_idx + kBlockM * accum_n_idx; int accum_n = kThreadsM * accum_n_idx; // mL and nL are logical coordinate in 2D mapping of epilogue's 4D output int nL = nL_base + accum_n; bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column()); AccumulatorFragmentType accum_fragment; reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx]; OutputFragmentType output_fragment; if(guard) { reinterpret_cast<OutputAccessType &>(output_fragment) = *reinterpret_cast<OutputAccessType const *>(source_ptr + nL); } // Perform output operator output_fragment = output_op(accum_fragment, output_fragment); if(guard) { // Store *reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment); } } } } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) const int kAccumBlockN = 2; const int kThreadsM = 8; const int kThreadsN = 4; const int kBlockM = WarpShape::kM / kThreadsM; /// Array type used to output using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>; /// Array type passed to the output operator - unused elements are optimized away using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>; /// Array type used by output functor using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>; /// Array type used by output functor using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>; AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators); CUTLASS_PRAGMA_UNROLL for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) { int accum_m = kThreadsM * accum_m_idx; int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m; int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n; ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride; int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN; CUTLASS_PRAGMA_UNROLL for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) { int accum_idx = accum_m_idx + kBlockM * accum_n_idx; int accum_n = kThreadsM * accum_n_idx; // mL and nL are logical coordinate in 2D mapping of epilogue's 4D output int nL = nL_base + accum_n; bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column()); AccumulatorFragmentType accum_fragment; reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx]; OutputFragmentType output_fragment; // Perform output operator output_fragment = output_op(accum_fragment); if(guard) { // Store *reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment); } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_direct_store.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_direct_store.h", "repo_id": "cutlass", "token_count": 4855 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a class for using IEEE half-precision floating-point types in host or device code. */ #pragma once // FP8 types are available starting CUDA 11.8+ #if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) #define CUDA_FP8_ENABLED 1 #endif #if defined(__CUDA_ARCH__) # if (__CUDA_ARCH__ >= 900) # if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) # define CUDA_PTX_FP8_CVT_ENABLED 1 # endif // (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) # elif (__CUDA_ARCH__ == 890) # if (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1)) # define CUDA_PTX_FP8_CVT_ENABLED 1 # endif // (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1)) # endif // (__CUDA_ARCH__ >= 900) #endif // defined(__CUDA_ARCH__) #ifdef __GNUC__ // Ignore checks on reinterpret-casts that are being used for bitcasts. #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif /////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDACC_RTC__) #include "cutlass/floating_point_nvrtc.h" #else // // Standard Library headers belong here to avoid conflicts with NVRTC. // #include <cmath> #include <limits> #include <cstdint> #include <cstring> #endif #ifdef CUDA_FP8_ENABLED #include <cuda_fp8.h> #endif #include <cuda_fp16.h> #include "cutlass/cutlass.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// // // FP8 Has 2 encodings possible : E4M3 and E5M2 // // E4M3 : 7 | 6 5 4 3 | 2 1 0 // E5M2 : 7 | 6 5 4 3 2 | 1 0 // /////////////////////////////////////////////////////////////////////////////////////////////////// enum class FloatEncoding { E4M3, E5M2 }; template<FloatEncoding T> struct alignas(1) float8_base { static constexpr bool IS_E4M3 = (T == FloatEncoding::E4M3); static constexpr bool IS_E5M2 = (T == FloatEncoding::E5M2); // Number of Bits representing mantissa and exponents static constexpr int FP32_NUM_BITS = 32; static constexpr int FP32_NUM_EXPONENT_BITS = 8; static constexpr int FP32_NUM_MANTISSA_BITS = 23; static constexpr uint32_t FP32_NAN = 0x7fffffff; static constexpr uint32_t FP32_INFINITY_MASK = 0x7f800000; static constexpr int FP32_MAX_EXPONENT = 127; static constexpr int FP32_MIN_EXPONENT = -126; static constexpr int FP32_EXPONENT_BIAS = 127; static constexpr int FP16_NUM_BITS = 16; static constexpr int FP16_NUM_EXPONENT_BITS = 5; static constexpr int FP16_NUM_MANTISSA_BITS = 10; static constexpr uint16_t FP16_NAN = 0x7fff; static constexpr uint16_t FP16_INFINITY_MASK = 0x7c00; static constexpr int FP16_MAX_EXPONENT = 15; static constexpr int FP16_MIN_EXPONENT = -14; static constexpr int FP16_EXPONENT_BIAS = 15; static constexpr int FP8_NUM_BITS = 8; static constexpr int FP8_NUM_EXPONENT_BITS = IS_E4M3 ? 4 : 5; static constexpr int FP8_NUM_MANTISSA_BITS = IS_E4M3 ? 3 : 2; static constexpr uint8_t FP8_NAN = 0x7f; // Also F8_INF static constexpr uint8_t FP8_INFINITY_MASK = IS_E4M3 ? 0x78 : 0x7c; static constexpr int FP8_MAX_EXPONENT = IS_E4M3 ? 7 : 15; static constexpr int FP8_MIN_EXPONENT = IS_E4M3 ? -6 : -14; static constexpr int FP8_EXPONENT_BIAS = IS_E4M3 ? 7 : 15; static constexpr uint8_t FP8_EXPONENT_MASK = (1 << FP8_NUM_EXPONENT_BITS) - 1; static constexpr uint8_t FP8_MANTISSA_MASK = (1 << FP8_NUM_MANTISSA_BITS) - 1; static constexpr uint8_t FP8_MAX_FLT = (IS_E4M3 ? 0x7e : 0x7b); // 256 in float static constexpr uint32_t FP8_SAT_VAL_FP32 = 0x43800000; // // Data members // /// Data container uint8_t storage; /// Ctors. CUTLASS_HOST_DEVICE float8_base() : storage(0) { } /// Is finite implementation CUTLASS_HOST_DEVICE static bool isfinite(float flt) { uint32_t s; #if defined(__CUDA_ARCH__) s = reinterpret_cast<uint32_t const &>(flt); #else std::memcpy(&s, &flt, sizeof(s)); #endif return (s & 0x7f800000) < 0x7f800000; } /// Is NaN implementation CUTLASS_HOST_DEVICE static bool isnan(float flt) { uint32_t s; #if defined(__CUDA_ARCH__) s = reinterpret_cast<uint32_t const &>(flt); #else std::memcpy(&s, &flt, sizeof(s)); #endif return (s & 0x7fffffff) > 0x7f800000; } /// Is infinite implementation CUTLASS_HOST_DEVICE static bool isinf(float flt) { uint32_t s; #if defined(__CUDA_ARCH__) s = reinterpret_cast<uint32_t const &>(flt); #else std::memcpy(&s, &flt, sizeof(s)); #endif // Sign = 0 for +inf, 1 for -inf // Exponent = all ones // Mantissa = all zeros return (s == 0x7f800000) || (s == 0xff800000); } /// FP32 -> FP8 conversion - rounds to nearest even CUTLASS_HOST_DEVICE static uint8_t convert_float_to_fp8(float const& flt) { // software implementation rounds toward nearest even uint32_t s; #if defined(__CUDA_ARCH__) s = reinterpret_cast<uint32_t const &>(flt); #else std::memcpy(&s, &flt, sizeof(s)); #endif // Extract the bits in the FP32 type uint8_t sign = uint8_t((s >> 24 & 0x80)); int32_t exp = int32_t((s >> FP32_NUM_MANTISSA_BITS) & 0xff) - FP32_EXPONENT_BIAS; int mantissa = s & 0x7fffff; uint8_t u = 0; uint8_t const kF8_NaN = 0x7f; // NaN => NaN if (isnan(flt)) { return kF8_NaN; } // Inf => MAX_FLT (satfinite) if (isinf(flt)) { return sign | FP8_MAX_FLT; } // Special handling if (exp == -128) { // int8 range is from -128 to 127 // So 255(inf) - 127(bias) = 128 - will show up as -128 // satfinite return (sign | FP8_MAX_FLT); } int sticky_bit = 0; bool skip_sign = false; bool may_be_nan = false; if ( (exp >= FP8_MIN_EXPONENT) && (exp <= FP8_MAX_EXPONENT) ) { // normal fp32 to normal fp8 exp = exp + FP8_EXPONENT_BIAS; u = uint8_t((uint32_t(exp) & FP8_EXPONENT_MASK) << FP8_NUM_MANTISSA_BITS); u = uint8_t(u | (mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS))); } else if(exp < FP8_MIN_EXPONENT) { // normal single-precision to subnormal float8-precision representation int rshift = (FP8_MIN_EXPONENT - exp); if (rshift < FP32_NUM_BITS) { mantissa |= (1 << FP32_NUM_MANTISSA_BITS); sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0); mantissa = (mantissa >> rshift); u = (uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS- FP8_NUM_MANTISSA_BITS)) & FP8_MANTISSA_MASK); } else { mantissa = 0; u = 0; } // Exponent > FP8_MAX_EXPONENT - this is a special case done to match HW // 0x4380_0000 to 0x43e0_0000 - maps from 256 to 448, and does not saturate / inf. } else { if( exp == (FP8_MAX_EXPONENT + 1) ) { uint8_t mantissa_tmp = uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS)); if( mantissa_tmp < FP8_MANTISSA_MASK) { exp = exp + FP8_EXPONENT_BIAS; u = uint8_t(uint32_t(exp) << FP8_NUM_MANTISSA_BITS) | mantissa_tmp; may_be_nan = (mantissa_tmp == (FP8_MANTISSA_MASK-1)); } else { // satfinite return (sign | FP8_MAX_FLT); } } else{ // satfinite return (sign | FP8_MAX_FLT); } } // round to nearest even int NUM_BITS_SHIFT = FP32_NUM_MANTISSA_BITS - (FP8_NUM_MANTISSA_BITS + 1); int round_bit = ((mantissa >> NUM_BITS_SHIFT) & 1); sticky_bit |= ((mantissa & ((1 << NUM_BITS_SHIFT) - 1)) != 0); if ((round_bit && sticky_bit) || (round_bit && (u & 1))) { u = uint8_t(u + 1); if( may_be_nan ) { skip_sign = true; } } if (u > FP8_MAX_FLT) { // satfinite u = (sign | FP8_MAX_FLT); } if( ! skip_sign ) { u |= sign; } return u; } /// Converts a fp8 value stored as a uint8_t to a float CUTLASS_HOST_DEVICE static float convert_fp8_to_float(uint8_t const& x) { uint32_t constexpr kF32_NaN = 0x7fffffff; uint8_t const &f8 = x; uint32_t sign = (f8 >> (FP8_NUM_BITS - 1)) & 1; uint32_t exp = (f8 >> FP8_NUM_MANTISSA_BITS) & FP8_EXPONENT_MASK; uint32_t mantissa = f8 & FP8_MANTISSA_MASK; unsigned f = (sign << (FP32_NUM_BITS-1)); if (IS_E4M3 && exp == 15 && mantissa == 0x7) { f = kF32_NaN; } else if (exp > 0 && (IS_E4M3 || exp < (FP8_MAX_EXPONENT + FP8_EXPONENT_BIAS + 1))) { // normal exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS); f = f | (exp << FP32_NUM_MANTISSA_BITS) | (mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS)); } else if (exp == 0) { if (mantissa) { // subnormal exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS) + 1; while ((mantissa & (1 << FP8_NUM_MANTISSA_BITS)) == 0) { mantissa <<= 1; exp--; } mantissa &= FP8_MANTISSA_MASK; f = f | (exp << FP32_NUM_MANTISSA_BITS) | (mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS)); } else { // sign-preserving zero } } else { if(mantissa == 0){ // Sign-preserving infinity f = (f | 0x7f800000); } else { // Canonical NaN f = kF32_NaN; } } #if defined(__CUDA_ARCH__) return reinterpret_cast<float const&>(f); #else float flt; std::memcpy(&flt, &f, sizeof(flt)); return flt; #endif } }; // Forward declaration of float_e5m2_t to define float_e4m3_t <=> float_e5m2_t // conversions in class float_e4m3_t struct float_e5m2_t; /////////////////////////////////////////////////////////////// /// /// floating-point 8 type : E4M3 /// /////////////////////////////////////////////////////////////// struct alignas(1) float_e4m3_t : float8_base<FloatEncoding::E4M3> { using Base = float8_base<FloatEncoding::E4M3>; static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT; // // Static conversion operators // /// Constructs from an uint8_t CUTLASS_HOST_DEVICE static float_e4m3_t bitcast(uint8_t x) { float_e4m3_t f; f.storage = x; return f; } /// FP32 -> FP8 conversion - rounds to nearest even CUTLASS_HOST_DEVICE static float_e4m3_t from_float(float const& flt) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t tmp; float y = float(); asm volatile("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt)); return *reinterpret_cast<float_e4m3_t *>(&tmp); #else return bitcast(Base::convert_float_to_fp8(flt)); #endif } /// FP16 -> E5M2 conversion - rounds to nearest even CUTLASS_HOST_DEVICE static float_e4m3_t from_half(half const& flt) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t tmp = 0; uint32_t bits = reinterpret_cast<uint16_t const &>(flt); asm volatile("cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits)); return *reinterpret_cast<float_e4m3_t *>(&tmp); #else return bitcast(Base::convert_float_to_fp8(__half2float(flt))); #endif } // E4M3 -> half CUTLASS_HOST_DEVICE static half to_half(float_e4m3_t const& x) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t bits = x.storage; uint32_t packed; asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits)); return reinterpret_cast<half2 const &>(packed).x; #else return __float2half(Base::convert_fp8_to_float(x.storage)); #endif } // E4M3 -> Float CUTLASS_HOST_DEVICE static float to_float(float_e4m3_t const& x) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t bits = x.storage; uint32_t packed; asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits)); return __half2float(reinterpret_cast<half2 const &>(packed).x); #else return Base::convert_fp8_to_float(x.storage); #endif } // // Methods // /// Constructor inheritance using Base::Base; /// Default constructor float_e4m3_t() = default; #ifdef CUDA_FP8_ENABLED /// Conversion from CUDA's FP8 type CUTLASS_HOST_DEVICE explicit float_e4m3_t(__nv_fp8_e4m3 x) { storage = x.__x; } #endif /// Floating point conversion CUTLASS_HOST_DEVICE explicit float_e4m3_t(float x) { storage = from_float(x).storage; } CUTLASS_HOST_DEVICE explicit float_e4m3_t(half x) { storage = from_half(x).storage; } /// Floating point conversion CUTLASS_HOST_DEVICE explicit float_e4m3_t(double x): float_e4m3_t(float(x)) { } /// Integer conversion CUTLASS_HOST_DEVICE explicit float_e4m3_t(int x): float_e4m3_t(float(x)) { } CUTLASS_HOST_DEVICE explicit float_e4m3_t(unsigned x): float_e4m3_t(float(x)) { } /// E5M2 conversion. Defined after float_e5m2_t is defined. CUTLASS_HOST_DEVICE explicit float_e4m3_t(float_e5m2_t x); #ifdef CUDA_FP8_ENABLED /// Assignment from CUDA's FP8 type CUTLASS_HOST_DEVICE float_e4m3_t & operator=(__nv_fp8_e4m3 x) { storage = x.__x; return *this; } #endif /// Converts to float CUTLASS_HOST_DEVICE operator float() const { return to_float(*this); } /// Converts to half CUTLASS_HOST_DEVICE operator half() const { return to_half(*this); } /// Converts to float CUTLASS_HOST_DEVICE explicit operator double() const { return double(to_float(*this)); } /// Converts to int CUTLASS_HOST_DEVICE explicit operator int() const { #if defined(__CUDA_ARCH__) return __half2int_rn(to_half(*this)); #else return int(to_float(*this)); #endif } /// Casts to bool CUTLASS_HOST_DEVICE explicit operator bool() const { #if defined(__CUDA_ARCH__) return bool(__half2int_rn(to_half(*this))); #else return bool(int(to_float(*this))); #endif } /// Accesses raw internal state CUTLASS_HOST_DEVICE uint8_t& raw() { return storage; } /// Accesses raw internal state CUTLASS_HOST_DEVICE uint8_t raw() const { return storage; } /// Returns the sign bit CUTLASS_HOST_DEVICE bool signbit() const { return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0); } /// Returns the biased exponent CUTLASS_HOST_DEVICE int exponent_biased() const { return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK); } /// Returns the unbiased exponent CUTLASS_HOST_DEVICE int exponent() const { return exponent_biased() - 15; } /// Returns the mantissa CUTLASS_HOST_DEVICE int mantissa() const { return int(storage & Base::FP8_MANTISSA_MASK); } }; /////////////////////////////////////////////////////////////// /// /// floating-point 8 type : E5M2 /// /////////////////////////////////////////////////////////////// struct alignas(1) float_e5m2_t : float8_base<FloatEncoding::E5M2> { using Base = float8_base<FloatEncoding::E5M2>; static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT; // // Static conversion operators // /// Constructs from an uint8_t CUTLASS_HOST_DEVICE static float_e5m2_t bitcast(uint8_t x) { float_e5m2_t f; f.storage = x; return f; } /// FP32 -> FP8 conversion - rounds to nearest even CUTLASS_HOST_DEVICE static float_e5m2_t from_float(float const& flt) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t tmp; float y = float(); asm volatile("cvt.rn.satfinite.e5m2x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt)); return *reinterpret_cast<float_e5m2_t *>(&tmp); #else return bitcast(Base::convert_float_to_fp8(flt)); #endif } /// FP16 -> E5M2 conversion - rounds to nearest even CUTLASS_HOST_DEVICE static float_e5m2_t from_half(half const& flt) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t tmp = 0; uint32_t bits = reinterpret_cast<uint16_t const &>(flt); asm volatile("cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits)); return *reinterpret_cast<float_e5m2_t *>(&tmp); #else return bitcast(Base::convert_float_to_fp8(__half2float(flt))); #endif } // E5M2 -> half CUTLASS_HOST_DEVICE static half to_half(float_e5m2_t const& x) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t bits = x.storage; uint32_t packed; asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits)); return reinterpret_cast<half2 const &>(packed).x; #else return __float2half(Base::convert_fp8_to_float(x.storage)); #endif } // E5M2 -> Float CUTLASS_HOST_DEVICE static float to_float(float_e5m2_t const& x) { #if defined(CUDA_PTX_FP8_CVT_ENABLED) uint16_t bits = x.storage; uint32_t packed; asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits)); return __half2float(reinterpret_cast<half2 const &>(packed).x); #else return Base::convert_fp8_to_float(x.storage); #endif } // // Methods // /// Constructor inheritance using Base::Base; /// Default constructor float_e5m2_t() = default; #ifdef CUDA_FP8_ENABLED /// Conversion from CUDA's FP8 type CUTLASS_HOST_DEVICE explicit float_e5m2_t(__nv_fp8_e5m2 x) { storage = x.__x; } #endif /// Floating point conversion CUTLASS_HOST_DEVICE explicit float_e5m2_t(float x) { storage = from_float(x).storage; } CUTLASS_HOST_DEVICE explicit float_e5m2_t(half x) { storage = from_half(x).storage; } /// Floating point conversion CUTLASS_HOST_DEVICE explicit float_e5m2_t(double x): float_e5m2_t(float(x)) { } /// Integer conversion CUTLASS_HOST_DEVICE explicit float_e5m2_t(int x): float_e5m2_t(float(x)) { } CUTLASS_HOST_DEVICE explicit float_e5m2_t(unsigned x): float_e5m2_t(float(x)) { } /// E4M3 conversion CUTLASS_HOST_DEVICE explicit float_e5m2_t(float_e4m3_t x); #ifdef CUDA_FP8_ENABLED /// Assignment from CUDA's FP8 type CUTLASS_HOST_DEVICE float_e5m2_t & operator=(__nv_fp8_e5m2 x) { storage = x.__x; return *this; } #endif /// Converts to float CUTLASS_HOST_DEVICE operator float() const { return to_float(*this); } /// Converts to half CUTLASS_HOST_DEVICE operator half() const { return to_half(*this); } /// Converts to float CUTLASS_HOST_DEVICE explicit operator double() const { return double(to_float(*this)); } /// Converts to int CUTLASS_HOST_DEVICE explicit operator int() const { #if defined(__CUDA_ARCH__) return __half2int_rn(to_half(*this)); #else return int(to_float(*this)); #endif } /// Casts to bool CUTLASS_HOST_DEVICE explicit operator bool() const { #if defined(__CUDA_ARCH__) return bool(__half2int_rn(to_half(*this))); #else return bool(int(to_float(*this))); #endif } /// Accesses raw internal state CUTLASS_HOST_DEVICE uint8_t& raw() { return storage; } /// Accesses raw internal state CUTLASS_HOST_DEVICE uint8_t raw() const { return storage; } /// Returns the sign bit CUTLASS_HOST_DEVICE bool signbit() const { return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0); } /// Returns the biased exponent CUTLASS_HOST_DEVICE int exponent_biased() const { return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK); } /// Returns the unbiased exponent CUTLASS_HOST_DEVICE int exponent() const { return exponent_biased() - 15; } /// Returns the mantissa CUTLASS_HOST_DEVICE int mantissa() const { return int(storage & Base::FP8_MANTISSA_MASK); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Arithmetic operators // /////////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE bool operator==(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) == float(rhs); } CUTLASS_HOST_DEVICE bool operator!=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) != float(rhs); } CUTLASS_HOST_DEVICE bool operator<(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) < float(rhs); } CUTLASS_HOST_DEVICE bool operator<=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) <= float(rhs); } CUTLASS_HOST_DEVICE bool operator>(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) > float(rhs); } CUTLASS_HOST_DEVICE bool operator>=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float(lhs) >= float(rhs); } CUTLASS_HOST_DEVICE float_e4m3_t operator+(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float_e4m3_t(float(lhs) + float(rhs)); } CUTLASS_HOST_DEVICE float_e4m3_t operator-(float_e4m3_t const& lhs) { return float_e4m3_t(-float(lhs)); } CUTLASS_HOST_DEVICE float_e4m3_t operator-(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float_e4m3_t(float(lhs) - float(rhs)); } CUTLASS_HOST_DEVICE float_e4m3_t operator*(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float_e4m3_t(float(lhs) * float(rhs)); } CUTLASS_HOST_DEVICE float_e4m3_t operator/(float_e4m3_t const& lhs, float_e4m3_t const& rhs) { return float_e4m3_t(float(lhs) / float(rhs)); } CUTLASS_HOST_DEVICE float_e4m3_t& operator+=(float_e4m3_t & lhs, float_e4m3_t const& rhs) { lhs = float_e4m3_t(float(lhs) + float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t& operator-=(float_e4m3_t & lhs, float_e4m3_t const& rhs) { lhs = float_e4m3_t(float(lhs) - float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t& operator*=(float_e4m3_t & lhs, float_e4m3_t const& rhs) { lhs = float_e4m3_t(float(lhs) * float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t& operator/=(float_e4m3_t & lhs, float_e4m3_t const& rhs) { lhs = float_e4m3_t(float(lhs) / float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t& operator++(float_e4m3_t & lhs) { float tmp(lhs); ++tmp; lhs = float_e4m3_t(tmp); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t& operator--(float_e4m3_t & lhs) { float tmp(lhs); --tmp; lhs = float_e4m3_t(tmp); return lhs; } CUTLASS_HOST_DEVICE float_e4m3_t operator++(float_e4m3_t & lhs, int) { float_e4m3_t ret(lhs); float tmp(lhs); tmp++; lhs = float_e4m3_t(tmp); return ret; } CUTLASS_HOST_DEVICE float_e4m3_t operator--(float_e4m3_t & lhs, int) { float_e4m3_t ret(lhs); float tmp(lhs); tmp--; lhs = float_e4m3_t(tmp); return ret; } CUTLASS_HOST_DEVICE bool operator==(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) == float(rhs); } CUTLASS_HOST_DEVICE bool operator!=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) != float(rhs); } CUTLASS_HOST_DEVICE bool operator<(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) < float(rhs); } CUTLASS_HOST_DEVICE bool operator<=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) <= float(rhs); } CUTLASS_HOST_DEVICE bool operator>(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) > float(rhs); } CUTLASS_HOST_DEVICE bool operator>=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float(lhs) >= float(rhs); } CUTLASS_HOST_DEVICE float_e5m2_t operator+(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float_e5m2_t(float(lhs) + float(rhs)); } CUTLASS_HOST_DEVICE float_e5m2_t operator-(float_e5m2_t const& lhs) { return float_e5m2_t(-float(lhs)); } CUTLASS_HOST_DEVICE float_e5m2_t operator-(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float_e5m2_t(float(lhs) - float(rhs)); } CUTLASS_HOST_DEVICE float_e5m2_t operator*(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float_e5m2_t(float(lhs) * float(rhs)); } CUTLASS_HOST_DEVICE float_e5m2_t operator/(float_e5m2_t const& lhs, float_e5m2_t const& rhs) { return float_e5m2_t(float(lhs) / float(rhs)); } CUTLASS_HOST_DEVICE float_e5m2_t& operator+=(float_e5m2_t & lhs, float_e5m2_t const& rhs) { lhs = float_e5m2_t(float(lhs) + float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t& operator-=(float_e5m2_t & lhs, float_e5m2_t const& rhs) { lhs = float_e5m2_t(float(lhs) - float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t& operator*=(float_e5m2_t & lhs, float_e5m2_t const& rhs) { lhs = float_e5m2_t(float(lhs) * float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t& operator/=(float_e5m2_t & lhs, float_e5m2_t const& rhs) { lhs = float_e5m2_t(float(lhs) / float(rhs)); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t& operator++(float_e5m2_t & lhs) { float tmp(lhs); ++tmp; lhs = float_e5m2_t(tmp); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t& operator--(float_e5m2_t & lhs) { float tmp(lhs); --tmp; lhs = float_e5m2_t(tmp); return lhs; } CUTLASS_HOST_DEVICE float_e5m2_t operator++(float_e5m2_t & lhs, int) { float_e5m2_t ret(lhs); float tmp(lhs); tmp++; lhs = float_e5m2_t(tmp); return ret; } CUTLASS_HOST_DEVICE float_e5m2_t operator--(float_e5m2_t & lhs, int) { float_e5m2_t ret(lhs); float tmp(lhs); tmp--; lhs = float_e5m2_t(tmp); return ret; } /////////////////////////////////////////////////////////////////////////////////////////////////// // // float_e4m3_t <=> float_e5m2_t conversions // /////////////////////////////////////////////////////////////////////////////////////////////////// /// float_e4m3_t <= float_e5m2_t CUTLASS_HOST_DEVICE float_e4m3_t::float_e4m3_t(float_e5m2_t x) { storage = from_float(float_e5m2_t::to_float(x)).storage; } /// float_e5m2_t <= float_e4m3_t CUTLASS_HOST_DEVICE float_e5m2_t::float_e5m2_t(float_e4m3_t x) { storage = from_float(float_e4m3_t::to_float(x)).storage; } /////////////////////////////////////////////////////////////// /// /// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t /// This umbrella datatype can be enabled when a user provides a specific /// datatype in runtime argument list. /// /// Currently supported runtime datatypes compatible with type_erased_dynamic_float8_t: /// QMMAFormat::E5M2 /// QMMAFormat::E4M3 /// /////////////////////////////////////////////////////////////// union type_erased_dynamic_float8_t { uint8_t data; cutlass::float_e5m2_t e5m2; cutlass::float_e4m3_t e4m3; CUTLASS_HOST_DEVICE explicit operator cutlass::float_e5m2_t() const { return e5m2; } CUTLASS_HOST_DEVICE explicit operator cutlass::float_e4m3_t() const { return e4m3; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // // Standard Library operations and definitions // /////////////////////////////////////////////////////////////////////////////////////////////////// #if !defined(__CUDACC_RTC__) namespace std { /// Numeric limits common to all float8 types template <typename T> struct float8_base_numeric_limits { private: using F8Type = T; public: static bool const is_specialized = true; static bool const is_signed = true; static bool const is_integer = false; static bool const is_exact = false; static bool const has_quiet_NaN = true; static bool const has_signaling_NaN = false; static std::float_denorm_style const has_denorm = std::denorm_present; static bool const has_denorm_loss = true; static std::float_round_style const round_style = std::round_to_nearest; static bool const is_iec559 = false; static bool const is_bounded = true; static bool const is_modulo = false; static int const digits = F8Type::FP8_NUM_MANTISSA_BITS; /// Least positive value CUTLASS_HOST_DEVICE static F8Type min() { return F8Type::bitcast(0x01); } /// Maximum finite value CUTLASS_HOST_DEVICE static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); } /// Returns maximum rounding error CUTLASS_HOST_DEVICE static F8Type round_error() { return F8Type(0.5f); } /// Returns positive infinity value CUTLASS_HOST_DEVICE static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); } /// Returns quiet NaN value CUTLASS_HOST_DEVICE static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); } /// Returns signaling NaN value CUTLASS_HOST_DEVICE static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); } /// Returns smallest positive subnormal value CUTLASS_HOST_DEVICE static F8Type denorm_min() { return F8Type::bitcast(0x01); } }; /// Numeric limits for float_e4m3_t template <> struct numeric_limits<cutlass::float_e4m3_t> : public float8_base_numeric_limits<cutlass::float_e4m3_t> { static bool const has_infinity = false; /// Minimum finite value static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); } /// Machine epsilon, that is, the difference between 1.0 and the next representable value static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); } }; /// Numeric limits for float_e5m2_t template <> struct numeric_limits<cutlass::float_e5m2_t> : public float8_base_numeric_limits<cutlass::float_e5m2_t> { static bool const has_infinity = true; /// Minimum finite value static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); } /// Machine epsilon, that is, the difference between 1.0 and the next representable value static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); } }; } // namespace std #endif namespace platform { /// Numeric limits common to all float8 types template <typename T> struct float8_base_numeric_limits { private: using F8Type = T; public: static bool const is_specialized = true; static bool const is_signed = true; static bool const is_integer = false; static bool const is_exact = false; static bool const has_quiet_NaN = true; static bool const has_signaling_NaN = false; #if !defined(__CUDACC_RTC__) static std::float_denorm_style const has_denorm = std::denorm_present; #endif static bool const has_denorm_loss = true; #if !defined(__CUDACC_RTC__) static std::float_round_style const round_style = std::round_to_nearest; #endif static bool const is_iec559 = false; static bool const is_bounded = true; static bool const is_modulo = false; static int const digits = F8Type::FP8_NUM_MANTISSA_BITS; /// Least positive value CUTLASS_HOST_DEVICE static F8Type min() { return F8Type::bitcast(0x01); } /// Maximum finite value CUTLASS_HOST_DEVICE static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); } /// Returns maximum rounding error CUTLASS_HOST_DEVICE static F8Type round_error() { return F8Type(0.5f); } /// Returns positive infinity value CUTLASS_HOST_DEVICE static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); } /// Returns quiet NaN value CUTLASS_HOST_DEVICE static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); } /// Returns signaling NaN value CUTLASS_HOST_DEVICE static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); } /// Returns smallest positive subnormal value CUTLASS_HOST_DEVICE static F8Type denorm_min() { return F8Type::bitcast(0x01); } }; /// std::numeric_limits template <class T> struct numeric_limits; /// Numeric limits for float_e4m3_t template <> struct numeric_limits<cutlass::float_e4m3_t> : public float8_base_numeric_limits<cutlass::float_e4m3_t> { static bool const has_infinity = false; /// Minimum finite value static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); } /// Machine epsilon, that is, the difference between 1.0 and the next representable value static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); } }; /// Numeric limits for float_e5m2_t template <> struct numeric_limits<cutlass::float_e5m2_t> : public float8_base_numeric_limits<cutlass::float_e5m2_t> { static bool const has_infinity = true; /// Minimum finite value static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); } /// Machine epsilon, that is, the difference between 1.0 and the next representable value static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); } }; } // namespace platform /////////////////////////////////////////////////////////////////////////////////////////////////// // // User-defined literals // CUTLASS_HOST_DEVICE cutlass::float_e4m3_t operator "" _fe4m3(long double x) { return cutlass::float_e4m3_t(float(x)); } CUTLASS_HOST_DEVICE cutlass::float_e4m3_t operator "" _fe4m3(unsigned long long int x) { return cutlass::float_e4m3_t(int(x)); } CUTLASS_HOST_DEVICE cutlass::float_e5m2_t operator "" _fe5m2(long double x) { return cutlass::float_e5m2_t(float(x)); } CUTLASS_HOST_DEVICE cutlass::float_e5m2_t operator "" _fe5m2(unsigned long long int x) { return cutlass::float_e5m2_t(int(x)); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/float8.h/0
{ "file_path": "cutlass/include/cutlass/float8.h", "repo_id": "cutlass", "token_count": 16661 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and batched array variants. */ #pragma once // common #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/detail/layout.hpp" #include "cutlass/detail/mma.hpp" #include "cutlass/cuda_host_adapter.hpp" #if !defined(__CUDACC_RTC__) #include "cutlass/cluster_launch.hpp" #include "cutlass/trace.h" #endif // !defined(__CUDACC_RTC__) // 2.x #include "cutlass/gemm/device/gemm_universal_base.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h" // 3.x #include "cutlass/gemm/kernel/gemm_universal.hpp" //////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::device { //////////////////////////////////////////////////////////////////////////////// /*! GemmUniversalAdapter is a stateful, reusable GEMM handle built around a kernel of type cutlass::gemm::kernel::Gemm or cutlass::gemm::kernel::GemmUniversal. It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs to create it from the host facing arguments. For power users, new static methods are exposed in 3.x APIs that bypass the stateful methods or args->params lowering. It supports kernel types that implement both the 2.x and 3.0 APIs, however, this is done by specializing the implementation of GemmUniversalAdapter on the two kernel API types, and thus, GemmUniversalAdapter's behaviour might differ between the two specializations. */ template <class GemmKernel_, class Enable = void> class GemmUniversalAdapter; //////////////////////////////////////////////////////////////////////////////// ////////////////////////////// CUTLASS 3.x API ///////////////////////////////// //////////////////////////////////////////////////////////////////////////////// template <class GemmKernel_> class GemmUniversalAdapter< GemmKernel_, cute::enable_if_t<gemm::detail::IsCutlass3GemmKernel<GemmKernel_>::value>> { public: using GemmKernel = GemmKernel_; using TileShape = typename GemmKernel::TileShape; using ElementA = typename GemmKernel::ElementA; using ElementB = typename GemmKernel::ElementB; using ElementC = typename GemmKernel::ElementC; using ElementD = typename GemmKernel::ElementD; using ElementAccumulator = typename GemmKernel::ElementAccumulator; using DispatchPolicy = typename GemmKernel::DispatchPolicy; using CollectiveMainloop = typename GemmKernel::CollectiveMainloop; using CollectiveEpilogue = typename GemmKernel::CollectiveEpilogue; // Map back to 2.x type as best as possible using LayoutA = gemm::detail::StrideToLayoutTagA_t<typename GemmKernel::StrideA>; using LayoutB = gemm::detail::StrideToLayoutTagB_t<typename GemmKernel::StrideB>; using LayoutC = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideC>; using LayoutD = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideD>; static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER; static ComplexTransform const kTransformA = cute::is_same_v<typename GemmKernel::CollectiveMainloop::TransformA, cute::conjugate> ? ComplexTransform::kConjugate : ComplexTransform::kNone; static ComplexTransform const kTransformB = cute::is_same_v<typename GemmKernel::CollectiveMainloop::TransformB, cute::conjugate> ? ComplexTransform::kConjugate : ComplexTransform::kNone; // Legacy: Assume MultiplyAdd only since we do not use this tag type in 3.0 using MathOperator = cutlass::arch::OpMultiplyAdd; using OperatorClass = cutlass::detail::get_operator_class_t<typename CollectiveMainloop::TiledMma>; using ArchTag = typename GemmKernel::ArchTag; // NOTE: Assume identity swizzle for now using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape using ThreadblockShape = cutlass::gemm::GemmShape< cute::size<0>(TileShape{}), cute::size<1>(TileShape{}), cute::size<2>(TileShape{})>; using ClusterShape = cutlass::gemm::GemmShape< cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}), cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}), cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{})>; // Instruction shape is easy too, since we get that directly from our TiledMma's atom shape using InstructionShape = cutlass::gemm::GemmShape< cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>; // Legacy: provide a correct warp count, but no reliable warp shape static int const kThreadCount = GemmKernel::MaxThreadsPerBlock; // Warp shape is not a primary API type in 3.x // But we can best approximate it by inspecting the TiledMma // For this, we make the assumption that we always have 4 warps along M, and rest along N, none along K // We also always round up the warp count to 4 if the tiled mma is smaller than 128 threads static constexpr int WarpsInMma = cute::max(4, CUTE_STATIC_V(cute::size(typename GemmKernel::TiledMma{})) / 32); static constexpr int WarpsInMmaM = 4; static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM); using WarpCount = cutlass::gemm::GemmShape<WarpsInMmaM, WarpsInMmaN, 1>; using WarpShape = cutlass::gemm::GemmShape< CUTE_STATIC_V(cute::tile_size<0>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaM, CUTE_STATIC_V(cute::tile_size<1>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaN, CUTE_STATIC_V(cute::tile_size<2>(typename CollectiveMainloop::TiledMma{}))>; static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages; // Inspect TiledCopy for A and B to compute the alignment size static int constexpr kAlignmentA = cutlass::detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveMainloop::GmemTiledCopyA, ElementA, typename CollectiveMainloop::TiledMma::ValTypeA>(); static int constexpr kAlignmentB = cutlass::detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveMainloop::GmemTiledCopyB, ElementB, typename CollectiveMainloop::TiledMma::ValTypeB>(); static int constexpr kAlignmentC = cutlass::detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveEpilogue::GmemTiledCopyC, ElementC>(); static int constexpr kAlignmentD = cutlass::detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveEpilogue::GmemTiledCopyD, ElementD>(); using EpilogueOutputOp = typename CollectiveEpilogue::ThreadEpilogueOp; // Split-K preserves splits that are 128b aligned static int constexpr kSplitKAlignment = cute::max( 128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value); /// Argument structure: User API using Arguments = typename GemmKernel::Arguments; /// Argument structure: Kernel API using Params = typename GemmKernel::Params; private: /// Kernel API parameters object Params params_; public: /// Access the Params structure Params const& params() const { return params_; } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const& args) { if (GemmKernel::can_implement(args)) { return Status::kSuccess; } else { return Status::kInvalid; } } /// Gets the workspace size static size_t get_workspace_size(Arguments const& args) { size_t workspace_bytes = 0; if (args.mode == GemmUniversalMode::kGemmSplitKParallel) { workspace_bytes += sizeof(int) * size_t(cute::size<0>(TileShape{})) * size_t(cute::size<1>(TileShape{})); } CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes); workspace_bytes += GemmKernel::get_workspace_size(args); return workspace_bytes; } /// Computes the grid shape static dim3 get_grid_shape(Arguments const& args, void* workspace = nullptr) { auto tmp_params = GemmKernel::to_underlying_arguments(args, workspace); return GemmKernel::get_grid_shape(tmp_params); } /// Computes the grid shape static dim3 get_grid_shape(Params const& params) { return GemmKernel::get_grid_shape(params); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int /* smem_capacity */ = -1) { CUTLASS_TRACE_HOST("GemmUniversal::maximum_active_blocks()"); int max_active_blocks = -1; int smem_size = GemmKernel::SharedStorageSize; // first, account for dynamic smem capacity if needed cudaError_t result; if (smem_size >= (48 << 10)) { CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size); result = cudaFuncSetAttribute( device_kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST( " cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result)); return -1; } } // query occupancy after setting smem size result = cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, device_kernel<GemmKernel>, GemmKernel::MaxThreadsPerBlock, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST( " cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error: " << cudaGetErrorString(result)); return -1; } CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks); return max_active_blocks; } /// Initializes GEMM state from arguments. Status initialize( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { CUTLASS_TRACE_HOST("GemmUniversal::initialize() - workspace " << workspace << ", stream: " << (stream ? "non-null" : "null")); // Initialize the workspace Status status = GemmKernel::initialize_workspace(args, workspace, stream, cuda_adapter); if (status != Status::kSuccess) { return status; } // Initialize the Params structure params_ = GemmKernel::to_underlying_arguments(args, workspace); // Don't set the function attributes - require the CudaHostAdapter to set it. if constexpr (kEnableCudaHostAdapter) { CUTLASS_ASSERT(cuda_adapter); return Status::kSuccess; } else { // // Account for dynamic smem capacity if needed // int smem_size = GemmKernel::SharedStorageSize; CUTLASS_ASSERT(cuda_adapter == nullptr); if (smem_size >= (48 << 10)) { CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size); cudaError_t result = cudaFuncSetAttribute( device_kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result)); return Status::kErrorInternal; } } } return Status::kSuccess; } /// Update API is preserved in 3.0, but does not guarantee a lightweight update of params. Status update(Arguments const& args, void* workspace = nullptr) { CUTLASS_TRACE_HOST("GemmUniversal()::update() - workspace: " << workspace); size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes > 0 && nullptr == workspace) { return Status::kErrorWorkspaceNull; } params_ = GemmKernel::to_underlying_arguments(args, workspace); return Status::kSuccess; } /// Primary run() entry point API that is static allowing users to create and manage their own params. /// Supplied params struct must be construct by calling GemmKernel::to_underling_arguments() static Status run(Params& params, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { CUTLASS_TRACE_HOST("GemmUniversal::run()"); dim3 const block = GemmKernel::get_block_shape(); dim3 const grid = get_grid_shape(params); // configure smem size and carveout int smem_size = GemmKernel::SharedStorageSize; Status launch_result{ Status::kSuccess }; // Use extended launch API only for mainloops that use it if constexpr (GemmKernel::ArchTag::kMinComputeCapability >= 90) { dim3 cluster(cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}), cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}), cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{})); void* kernel_params[] = {&params}; if constexpr (kEnableCudaHostAdapter) { // // Use the cuda host adapter // CUTLASS_ASSERT(cuda_adapter); if (cuda_adapter) { launch_result = cuda_adapter->launch(grid, cluster, block, smem_size, stream, kernel_params, 0); } else { return Status::kErrorInternal; } } else { CUTLASS_ASSERT(cuda_adapter == nullptr); void const* kernel = (void const*) device_kernel<GemmKernel>; if constexpr (GemmKernel::ArchTag::kMinComputeCapability == 90) { launch_result = ClusterLauncher::launch( grid, cluster, block, smem_size, stream, kernel, kernel_params); } } } else { launch_result = Status::kSuccess; if constexpr (kEnableCudaHostAdapter) { CUTLASS_ASSERT(cuda_adapter); if (cuda_adapter) { void* kernel_params[] = {&params}; launch_result = cuda_adapter->launch( grid, block, smem_size, stream, kernel_params, 0 ); } else { return Status::kErrorInternal; } } else { CUTLASS_ASSERT(cuda_adapter == nullptr); device_kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params); } } cudaError_t result = cudaGetLastError(); if (cudaSuccess == result && Status::kSuccess == launch_result) { return Status::kSuccess; } else { CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result); return Status::kErrorInternal; } } // // Non-static launch overloads that first create and set the internal params struct of this kernel handle. // /// Launches the kernel after first constructing Params internal state from supplied arguments. Status run( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr ) { Status status = initialize(args, workspace, stream, cuda_adapter); if (Status::kSuccess == status) { status = run(params_, stream, cuda_adapter); } return status; } /// Launches the kernel after first constructing Params internal state from supplied arguments. Status operator()( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return run(args, workspace, stream, cuda_adapter); } /// Overload that allows a user to re-launch the same kernel without updating internal params struct. Status run(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return run(params_, stream, cuda_adapter); } /// Overload that allows a user to re-launch the same kernel without updating internal params struct. Status operator()(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return run(params_, stream, cuda_adapter); } }; //////////////////////////////////////////////////////////////////////////////// ////////////////////////////// CUTLASS 2.x API ///////////////////////////////// //////////////////////////////////////////////////////////////////////////////// template <class GemmKernel_> class GemmUniversalAdapter< GemmKernel_, cute::enable_if_t<not gemm::detail::IsCutlass3GemmKernel<GemmKernel_>::value>> { public: using GemmKernel = GemmKernel_; static bool const kInternalTranspose = !cutlass::epilogue::threadblock::detail::is_2x_evt_v<typename GemmKernel::Epilogue> && // 2.x EVT does not require internal transpose cute::is_same<typename GemmKernel::LayoutC, cutlass::layout::RowMajor>::value; using ThreadblockShape = typename GemmKernel::Mma::Shape; using WarpShape = typename GemmKernel::WarpShape; using InstructionShape = typename GemmKernel::InstructionShape; // warp-level, arch-level (instruction), math operator using WarpMmaOperator = typename GemmKernel::Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename WarpMmaOperator::MathOperator; // Operator class and arch tag extract bottom-up // set it for top-level gemm device-level template using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; // Type, layout, and complex transform deliberately exchanged with B using MapArguments = kernel::detail::MapArguments< typename GemmKernel::ElementA, typename GemmKernel::LayoutA, GemmKernel::kTransformA, GemmKernel::kAlignmentA, typename GemmKernel::ElementB, typename GemmKernel::LayoutB, GemmKernel::kTransformB, GemmKernel::kAlignmentB, typename GemmKernel::LayoutC, kInternalTranspose >; using ElementA = typename MapArguments::ElementA; using LayoutA = typename MapArguments::LayoutA; static ComplexTransform const kTransformA = MapArguments::kTransformA; static int const kAlignmentA = MapArguments::kAlignmentA; using ElementB = typename MapArguments::ElementB; using LayoutB = typename MapArguments::LayoutB; static ComplexTransform const kTransformB = MapArguments::kTransformB; static int const kAlignmentB = MapArguments::kAlignmentB; using ElementC = typename GemmKernel::ElementC; using LayoutC = typename MapArguments::LayoutC; static int const kAlignmentC = GemmKernel::kAlignmentC; // C and D same type for 2.x kernel using ElementD = ElementC; using LayoutD = LayoutC; using TensorRefA = TensorRef<ElementA const, LayoutA>; using TensorRefB = TensorRef<ElementB const, LayoutB>; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementD, LayoutD>; static int const kStages = GemmKernel::Mma::kStages; using EpilogueOutputOp = typename GemmKernel::EpilogueOutputOp; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ThreadblockSwizzle = typename GemmKernel::ThreadblockSwizzle; using UnderlyingOperator = GemmUniversalBase<GemmKernel>; using Arguments = typename UnderlyingOperator::Arguments; private: UnderlyingOperator underlying_operator_; public: /// Constructs the GEMM. GemmUniversalAdapter() { } /// Helper to construct a transposed equivalent for the underying GEMM operator static Arguments to_underlying_arguments(Arguments const &args) { if (kInternalTranspose) { return args.transposed_problem(); } else { return args; } } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr) { return UnderlyingOperator::can_implement(to_underlying_arguments(args), cuda_adapter); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args), cuda_adapter); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr ) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream, cuda_adapter); } /// Lightweight update given a subset of arguments. Status update(Arguments const &args) { return underlying_operator_.update(to_underlying_arguments(args)); } /// Runs the kernel using initialized state. Status run( cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return underlying_operator_.run(stream, cuda_adapter); } /// Runs the kernel using initialized state. Status operator()( cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { Status status = initialize(args, workspace, stream, cuda_adapter); if (status == Status::kSuccess) { status = run(stream, cuda_adapter); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::device ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/gemm_universal_adapter.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/gemm_universal_adapter.h", "repo_id": "cutlass", "token_count": 8505 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Blocked-Ell sparse gemm operators. This operator combines threadblock-scoped ELL MMA with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/wmma.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm.h" #include "cutlass/gemm/kernel/gemm_pipelined.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" #endif //CUTLASS_ARCH_WMMA_ENABLED #include "cutlass/gemm/kernel/ell_gemm.h" #include "cutlass/gemm/threadblock/default_ell_mma.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse> struct DefaultEllGemm; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, IsASparse> { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount>::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Turing Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// If true, kernel is configured to support serial reduction in the epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse > { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape, InstructionShape, 2, Operator >::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout template < /// Element type for A matrix operand typename ElementA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Number of Interleaved k int InterleavedK, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse> struct DefaultEllGemm< ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA, ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, IsASparse> { using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>; using ElementAccumulator = int32_t; /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator, true>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock:: DefaultInterleavedEpilogueTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Turing Integer Matrix Multiply Interleaved layout template < /// Element type for A matrix operand typename ElementA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of Interleaved k int InterleavedK, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse> struct DefaultEllGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA, ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> { using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>; using ElementAccumulator = int32_t; /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape, InstructionShape, 2, Operator, true>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock:: DefaultInterleavedEpilogueTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Volta architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// If true, kernel is configured to support serial reduction in the epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, arch::Sm70, ThreadblockShape, WarpShape, GemmShape<8, 8, 4>, EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse > { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm70, ThreadblockShape, WarpShape, GemmShape<8, 8, 4>, 2, Operator >::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SIMT template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// If true, kernel is configured to support serial reduction in the epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm50, ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, 2, Operator>::ThreadblockMma; static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, typename Mma::Operator, EpilogueOutputOp, kEpilogueElementsPerAccess >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages int Stages, /// If true, kernel is configured to support serial reduction in the epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassSimt, arch::Sm80, ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, IsASparse> { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80, ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages, Operator>::ThreadblockMma; static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, typename Mma::Operator, EpilogueOutputOp, kEpilogueElementsPerAccess >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial,IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SIMT DP4A template < /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Layout type for C matrix operand typename LayoutC, /// Element type for C and D matrix operands typename ElementC, /// Tag indicating architecture to tune for typename ArchTag, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> { using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using ElementB = int8_t; using OperatorClass = arch::OpClassSimt; /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassSimt, arch::Sm50, ThreadblockShape, WarpShape, InstructionShape, 2, Operator >::ThreadblockMma; static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, typename Mma::Operator, EpilogueOutputOp, kEpilogueElementsPerAccess >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; #if defined(CUTLASS_ARCH_WMMA_ENABLED) //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Wmma Gemm Kernel template < ///< Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > struct DefaultEllGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, IsASparse> { /// Define the threadblock-scoped matrix multiply-accumulate using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp< ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; /// Define the kernel-level GEMM operator. using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>; }; //////////////////////////////////////////////////////////////////////////////// #endif //CUTLASS_ARCH_WMMA_ENABLED //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/default_ell_gemm.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_ell_gemm.h", "repo_id": "cutlass", "token_count": 10017 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a Block-Ell sparse gemm kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "cutlass/arch/arch.h" #include "cutlass/transform/threadblock/ell_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled. bool IsASparse ///! If true, A is sparse matrix > struct EllGemm { using Mma = Mma_; using Epilogue = Epilogue_; using OutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static bool const kSplitKSerial = SplitKSerial; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorA::TensorRef ref_A{}; typename Mma::IteratorB::Params params_B{}; typename Mma::IteratorB::TensorRef ref_B{}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::TensorRef ref_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::OutputTileIterator::TensorRef ref_D{}; typename OutputOp::Params output_op{}; int *semaphore = nullptr; int gemm_k_iterations{0}; int gemm_k_size{0}; const int* ell_idx = nullptr; int ell_ncol{0}; int ell_blocksize{0}; int ell_base_idx{0}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D, const int* ell_idx, int ell_ncol, int ell_blocksize, int ell_base_idx, typename OutputOp::Params output_op = typename OutputOp::Params(), int *workspace = nullptr ): problem_size(problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(ref_A.layout()), ref_A(ref_A), params_B(ref_B.layout()), ref_B(ref_B), params_C(ref_C.layout()), ref_C(ref_C), params_D(ref_D.layout()), ref_D(ref_D), output_op(output_op), ell_idx(ell_idx), ell_ncol(ell_ncol), ell_blocksize(ell_blocksize), ell_base_idx(ell_base_idx) { int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); gemm_k_size = gemm_k_iterations * Mma::Shape::kK; semaphore = workspace; } }; /// Shared memory storage structure struct SharedStorage { union{ typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; typename cutlass::transform::threadblock::ell::SharedStorage ell; }; // // Methods // EllGemm() = default; /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D) { static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<64>>::value) ? 64 : Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if (!TensorRef_aligned(ref_A, kAlignmentA)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kM - 1 ) / Mma::Shape::kM; int ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block; int tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block; // Compute position within threadblock int thread_idx = threadIdx.x; // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; typename Mma::FragmentC accumulators; accumulators.clear(); // skip computation if matrix is 0 if (params.ell_ncol > 0) { // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ ell_block_offset_m * params.ell_blocksize + tile_offset_m * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size }; cutlass::MatrixCoord tb_offset_B{ threadblock_tile_offset.k() * params.gemm_k_size, threadblock_tile_offset.n() * Mma::Shape::kN }; int ell_idx_start = (threadblock_tile_offset.m() / tile_in_ell_block) * (params.ell_ncol / params.ell_blocksize); const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]); // Problem size is a function of threadblock index in the K dimension int problem_size_k = min( params.problem_size.k(), (threadblock_tile_offset.k() + 1) * params.gemm_k_size); problem_size_k = min(problem_size_k, params.ell_ncol); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, params.ref_A.data(), {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, params.ref_B.data(), {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Define coef for ELL index depending on LayoutB int ell_stride = iterator_B.get_stride(); typename cutlass::transform::threadblock::ell::Iterator ell_iterator( shared_storage.ell, ell_idx_ptr, params.ell_blocksize, params.ell_base_idx, Mma::Shape::kK, problem_size_k, ell_stride, thread_idx ); // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); if (!kSplitKSerial || gemm_k_iterations > 0) { // check if index computations can be skipped static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8); constexpr bool is_multiple_alignment = (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1); const bool is_specialized_blocksize = ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0 && params.ell_blocksize >= Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add if ((is_double || is_multiple_alignment) && is_specialized_blocksize) { mma.operator()<true, true>( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); } else { mma.operator()<true, false>( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); } } } // if (params.ell_ncols > 0) // // Epilogue // OutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block; tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block; //assume identity swizzle MatrixCoord threadblock_offset( ell_block_offset_m * params.ell_blocksize + tile_offset_m * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); //avoid out of bounds MatrixCoord threadblock_extent( min(params.problem_size.m(), ell_block_offset_m * params.ell_blocksize + min((tile_offset_m + 1) * Mma::Shape::kM, params.ell_blocksize)), min(params.problem_size.n(), (threadblock_tile_offset.n()+1) * Mma::Shape::kN) ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); // If performing a reduction via split-K, fetch the initial synchronization if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, params.ref_C.data(), threadblock_extent, thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, params.ref_D.data(), threadblock_extent, thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; // B is Sparse template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. > struct EllGemm<Mma_, Epilogue_, ThreadblockSwizzle_, SplitKSerial, false> { using Mma = Mma_; using Epilogue = Epilogue_; using OutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static bool const kSplitKSerial = SplitKSerial; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorA::TensorRef ref_A{}; typename Mma::IteratorB::Params params_B{}; typename Mma::IteratorB::TensorRef ref_B{}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::TensorRef ref_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::OutputTileIterator::TensorRef ref_D{}; typename OutputOp::Params output_op{}; int *semaphore = nullptr; int gemm_k_iterations{0}; int gemm_k_size{0}; const int* ell_idx = nullptr; int ell_ncol{0}; int ell_blocksize{0}; int ell_base_idx{0}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D, const int* ell_idx, int ell_ncol, int ell_blocksize, int ell_base_idx, typename OutputOp::Params output_op = typename OutputOp::Params(), int *workspace = nullptr ): problem_size(problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(ref_A.layout()), ref_A(ref_A), params_B(ref_B.layout()), ref_B(ref_B), params_C(ref_C.layout()), ref_C(ref_C), params_D(ref_D.layout()), ref_D(ref_D), output_op(output_op), ell_idx(ell_idx), ell_ncol(ell_ncol), ell_blocksize(ell_blocksize), ell_base_idx(ell_base_idx) { int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); gemm_k_size = gemm_k_iterations * Mma::Shape::kK; semaphore = workspace; } }; /// Shared memory storage structure struct SharedStorage { union{ typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; typename cutlass::transform::threadblock::ell::SharedStorage ell; }; // // Methods // CUTLASS_HOST_DEVICE EllGemm() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D) { static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<64>>::value) ? 64 : Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if (!TensorRef_aligned(ref_A, kAlignmentA)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kN - 1 ) / Mma::Shape::kN; int ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block; int tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block; // Compute position within threadblock int thread_idx = threadIdx.x; // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; typename Mma::FragmentC accumulators; accumulators.clear(); // skip computation if matrix is 0 if (params.ell_ncol > 0) { // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size, }; cutlass::MatrixCoord tb_offset_B{ threadblock_tile_offset.k() * params.gemm_k_size, ell_block_offset_n * params.ell_blocksize + tile_offset_n * Mma::Shape::kN, }; int ell_idx_start = (threadblock_tile_offset.n() / tile_in_ell_block) * (params.ell_ncol / params.ell_blocksize); const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]); // Problem size is a function of threadblock index in the K dimension int problem_size_k = min( params.problem_size.k(), (threadblock_tile_offset.k() + 1) * params.gemm_k_size); problem_size_k = min(problem_size_k, params.ell_ncol); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, params.ref_A.data(), {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, params.ref_B.data(), {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Define coef for ELL index depending on LayoutA int ell_stride = iterator_A.get_stride(); typename cutlass::transform::threadblock::ell::Iterator ell_iterator( shared_storage.ell, ell_idx_ptr, params.ell_blocksize, params.ell_base_idx, Mma::Shape::kK, problem_size_k, ell_stride, thread_idx ); // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); if (!kSplitKSerial || gemm_k_iterations > 0) { // check if index computations can be skipped static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8); constexpr bool is_multiple_alignment = (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1); const bool is_specialized_blocksize = ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0 && params.ell_blocksize >= Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add if ((is_double || is_multiple_alignment) && is_specialized_blocksize) { mma.operator()<false, true>( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); } else { mma.operator()<false, false>( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); } } } // if (params.ell_ncols > 0) // // Epilogue // OutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block; tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block; //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, ell_block_offset_n * params.ell_blocksize + tile_offset_n * Mma::Shape::kN ); //avoid out of bounds MatrixCoord threadblock_extent( min(params.problem_size.m(), (threadblock_tile_offset.m()+1) * Mma::Shape::kM), min(params.problem_size.n(), ell_block_offset_n * params.ell_blocksize + min((tile_offset_n + 1) * Mma::Shape::kN, params.ell_blocksize)) ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); // If performing a reduction via split-K, fetch the initial synchronization if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, params.ref_C.data(), threadblock_extent, thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, params.ref_D.data(), threadblock_extent, thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/ell_gemm.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/ell_gemm.h", "repo_id": "cutlass", "token_count": 12123 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/blas3.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function FillMode FillModeC_ ///! Fill Mode for C (kLower or kUpper) > struct RankKUniversal { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static FillMode const kFillModeC = FillModeC_; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = 128 / sizeof_bits<ElementA>::value; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_C{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; typename LayoutA::Stride::Index lda{}; typename LayoutB::Stride::Index ldb{}; typename LayoutC::Stride::Index ldc{}; typename LayoutC::Stride::Index ldd{}; bool allow_early_exit{false}; // // Methods // Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_C, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_C, int64_t batch_stride_D, typename LayoutA::Stride::Index lda, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, bool allow_early_exit = false ): mode(mode), problem_size(problem_size), batch_count(batch_count), epilogue(epilogue), ptr_A(ptr_A), ptr_C(ptr_C), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), lda(lda), ldb(0), ldc(ldc), ldd(ldd), allow_early_exit(allow_early_exit) { } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count{0}; int gemm_k_size{0}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; void * ptr_C{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; int *semaphore{nullptr}; bool allow_early_exit{false}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( Arguments const &args, cutlass::gemm::GemmCoord const & grid_tiled_shape, int gemm_k_size, void *workspace = nullptr ): problem_size(args.problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(args.lda), params_B(args.lda), params_C(args.ldc), params_D(args.ldd), output_op(args.epilogue), mode(args.mode), batch_count(args.batch_count), gemm_k_size(gemm_k_size), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_A)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(const_cast<void *>(args.ptr_D)), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_A), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), semaphore(static_cast<int *>(workspace)), allow_early_exit(args.allow_early_exit) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_A); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; output_op = args.epilogue; semaphore = static_cast<int *>(workspace); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Methods // CUTLASS_DEVICE RankKUniversal() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit following LAPACK's definition if (params.allow_early_exit && (params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) { return; } // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Early exit if Fill Mode is Lower and // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) if (kFillModeC == cutlass::FillMode::kLower && (threadblock_tile_offset.m() + 1) * Mma::Shape::kM <= threadblock_tile_offset.n() * Mma::Shape::kN) { return; } // Early exit if Fill Mode is Upper and // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) if (kFillModeC == cutlass::FillMode::kUpper && threadblock_tile_offset.m() * Mma::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { return; } bool tile_on_diagonal = false; // Mark tiles that are being crossed by the main diagonal // (top-right and bottom-left corners are on either side of the diagonal) if ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM > threadblock_tile_offset.n() * Mma::Shape::kN && threadblock_tile_offset.m() * Mma::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { tile_on_diagonal = true; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // If CTA not on diagonal, FillMode doesn't apply. FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/rank_k_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/rank_k_universal.h", "repo_id": "cutlass", "token_count": 6750 }
39
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/fast_math.h" #include "cutlass/gemm_coord.hpp" #include "cutlass/kernel_hardware_info.hpp" #include "cutlass/gemm/kernel/tile_scheduler_params.h" #include "cute/layout.hpp" #include "cute/tensor.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/pipeline/pipeline.hpp" namespace cutlass::gemm::kernel::detail { /////////////////////////////////////////////////////////////////////////////// // Users are not supposed to use this class directly. // This is a CRTP base class for the actual tile schedulers. template<class Subclass> class StaticPersistentTileScheduler { // // Data members // private: uint64_t current_work_linear_idx_; uint64_t total_grid_size_; public: struct WorkTileInfo { int32_t M_idx = 0; int32_t N_idx = 0; int32_t L_idx = 0; bool is_valid_tile = false; CUTLASS_HOST_DEVICE bool is_valid() const { return is_valid_tile; } CUTLASS_HOST_DEVICE static WorkTileInfo invalid_work_tile() { return {-1, -1, -1, false}; } CUTLASS_HOST_DEVICE bool is_final_split(uint32_t k_tiles_per_output_tile) const { return true; } CUTLASS_HOST_DEVICE int32_t reduction_subtile_idx() const { return -1; } }; using Params = PersistentTileSchedulerSm90Params; using RasterOrder = typename Params::RasterOrder; using RasterOrderOptions = typename Params::RasterOrderOptions; public: struct Arguments { int max_swizzle_size = 1; RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; }; template <class ProblemShapeMNKL, class TileShape, class ClusterShape> static Params to_underlying_arguments( ProblemShapeMNKL problem_shape_mnkl, TileShape tile_shape, ClusterShape cluster_shape, [[maybe_unused]] KernelHardwareInfo const& hw_info, Arguments const& arguments, [[maybe_unused]] void* workspace=nullptr, [[maybe_unused]] const uint32_t epilogue_subtile = 1) { // We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic static_assert(cute::is_static<TileShape>::value); static_assert(cute::is_static<ClusterShape>::value); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); Params params; params.initialize( problem_blocks, to_gemm_coord(cluster_shape), hw_info, arguments.max_swizzle_size, arguments.raster_order ); return params; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { return true; } CUTLASS_HOST_DEVICE StaticPersistentTileScheduler() { } CUTLASS_DEVICE explicit StaticPersistentTileScheduler(Params const& params_) : scheduler_params(params_) { // MSVC requires protecting use of CUDA-specific nonstandard syntax, // like blockIdx and gridDim, with __CUDA_ARCH__. #if defined(__CUDA_ARCH__) if (params_.raster_order_ == RasterOrder::AlongN) { current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); } else { current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); } total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z); #else CUTLASS_ASSERT(false && "This line should never be reached"); #endif } // Returns the initial work tile info that will be computed over template <class ClusterShape> CUTLASS_DEVICE WorkTileInfo initial_work_tile_info(ClusterShape cluster_shape) { return get_current_work(); } CUTLASS_DEVICE WorkTileInfo get_current_work() const { return get_current_work_for_linear_idx(current_work_linear_idx_); } CUTLASS_DEVICE WorkTileInfo get_current_work_for_linear_idx(uint64_t linear_idx) const { if (linear_idx >= scheduler_params.blocks_per_problem_) { return WorkTileInfo::invalid_work_tile(); } // Map worker's linear index into the CTA tiled problem shape to the corresponding MNL indices uint64_t work_idx_l, remainder; scheduler_params.divmod_batch_(work_idx_l, remainder, linear_idx); uint64_t blk_per_grid_dim = scheduler_params.divmod_cluster_shape_minor_.divide(remainder); auto [work_idx_m, work_idx_n] = Subclass::get_work_idx_m_and_n(blk_per_grid_dim, scheduler_params.divmod_cluster_shape_major_, scheduler_params.divmod_cluster_shape_minor_, scheduler_params.divmod_cluster_blk_major_, scheduler_params.log_swizzle_size_, scheduler_params.raster_order_); return {work_idx_m, work_idx_n, static_cast<int32_t>(work_idx_l), true}; } CUTLASS_DEVICE void advance_to_next_work(uint32_t advance_count = 1) { current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count); } // Computes the linear index within a batch given M and N tile offsets within the batch. // This essentially inverts the mapping performed in get_work_idx_m_and_n static CUTLASS_DEVICE uint64_t get_linear_idx_from_m_and_n( int32_t tile_m, int32_t tile_n, FastDivmodU64Pow2 const& divmod_cluster_shape_major, FastDivmodU64Pow2 const& divmod_cluster_shape_minor, FastDivmodU64 const& divmod_cluster_blk_major, int32_t log_swizzle_size, RasterOrder raster_order) { auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); uint64_t minor_work_idx, major_work_idx, cluster_minor_offset; if (raster_order == RasterOrder::AlongN) { minor_work_idx = static_cast<uint64_t>(tile_m); major_work_idx = static_cast<uint64_t>(tile_n); cluster_minor_offset = cta_m_in_cluster; } else { major_work_idx = static_cast<uint64_t>(tile_m); minor_work_idx = static_cast<uint64_t>(tile_n); cluster_minor_offset = cta_n_in_cluster; } uint64_t cluster_idx_minor, cluster_idx_major, cluster_major_offset; cluster_idx_minor = divmod_cluster_shape_minor.divide(minor_work_idx - cluster_minor_offset); divmod_cluster_shape_major(cluster_idx_major, cluster_major_offset, major_work_idx); uint64_t cluster_idx_minor_div_swizzle = cluster_idx_minor >> log_swizzle_size; uint64_t offset = cluster_idx_minor & ((1 << log_swizzle_size) - 1); uint64_t extra = cluster_idx_minor_div_swizzle * divmod_cluster_blk_major.divisor + cluster_idx_major; uint64_t cluster_id = (extra << log_swizzle_size) | offset; return (cluster_id * divmod_cluster_shape_major.divisor + cluster_major_offset) * divmod_cluster_shape_minor.divisor + cluster_minor_offset; } // Given the inputs, computes the total number of output blocks over which this problem will compute. // Note that this is only the logical size of our grid, not the physical grid we will actually launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(ProblemShapeMNKL problem_shape_mnkl, BlockShape cta_shape, ClusterShape cluster_shape) { auto cta_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shape_mnkl), cute::shape<0>(cta_shape))); auto cta_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shape_mnkl), cute::shape<1>(cta_shape))); return Params::get_tiled_cta_shape_mnl( to_gemm_coord(problem_shape_mnkl), to_gemm_coord(cluster_shape), cta_m, cta_n ); } // Kernel helper function to get next work ID template <class WorkIdPipeline, class WorkIdPipelineState> CUTLASS_DEVICE auto fetch_next_work( WorkTileInfo work_tile_info, WorkIdPipeline& work_id_pipeline, WorkIdPipelineState work_id_pipe_consumer_state) { WorkTileInfo new_work_tile_info; advance_to_next_work(); new_work_tile_info = get_current_work(); // Return true to indicate that the WorkID pipeline state should be advanced return cute::make_tuple(new_work_tile_info, true); } CUTLASS_DEVICE static auto work_tile_to_cta_coord(WorkTileInfo work_tile_info) { // Get every cta coord in three dimensions of the cluster auto [cta_m_in_cluster, cta_n_in_cluster, cta_l_in_cluster] = cute::block_id_in_cluster(); return make_coord( work_tile_info.M_idx + static_cast<int32_t>(cta_m_in_cluster), work_tile_info.N_idx + static_cast<int32_t>(cta_n_in_cluster), _, work_tile_info.L_idx + static_cast<int32_t>(cta_l_in_cluster) ); } // Given the inputs, computes the physical grid we should launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_grid_shape( ProblemShapeMNKL problem_shape_mnk, BlockShape cta_shape, ClusterShape cluster_shape, KernelHardwareInfo hw_info, Arguments arguments, bool truncate_by_problem_size=true) { auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); return Params::get_grid_shape( problem_blocks, to_gemm_coord(cluster_shape), hw_info, arguments.max_swizzle_size, arguments.raster_order, /* truncate_by_problem_size = */true ); } // Given the inputs, computes the physical grid we should launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_grid_shape( Params const& params, ProblemShapeMNKL problem_shape_mnk, BlockShape cta_shape, ClusterShape cluster_shape, KernelHardwareInfo hw_info) { auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); Arguments args{}; if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) { args.max_swizzle_size = 1 << params.log_swizzle_size_; } args.raster_order = params.raster_order_ == RasterOrder::AlongN ? RasterOrderOptions::AlongN : RasterOrderOptions::AlongM; return Params::get_grid_shape( problem_blocks, to_gemm_coord(cluster_shape), hw_info, args.max_swizzle_size, args.raster_order, /* truncate_by_problem_size = */true ); } // Convert CTA-level work tile info to cluster-level tile coord CUTLASS_DEVICE auto work_tile_to_cluster_coord_mnkl(WorkTileInfo work_tile_info) const { // TileScheduler works at CTA-level, kernel works at cluster-level int m_coord = idx2crd(work_tile_info.M_idx / scheduler_params.cluster_shape_m_, scheduler_params.problem_tiles_m_); int n_coord = idx2crd(work_tile_info.N_idx / scheduler_params.cluster_shape_n_, scheduler_params.problem_tiles_n_); int l_coord = idx2crd(work_tile_info.L_idx, scheduler_params.problem_tiles_l_); return make_coord(m_coord, n_coord, _, l_coord); } // Returns whether the block assigned this work should compute the epilogue for the corresponding // output tile. For the basic tile scheduler, this is always true. CUTLASS_HOST_DEVICE static bool compute_epilogue(WorkTileInfo const&, Params const&) { return true; } CUTLASS_HOST_DEVICE static bool compute_epilogue(WorkTileInfo const&) { return true; } // Performs the reduction across splits for a given output tile. Since this scheduler does // not split output tiles, no reduction is needed. template <class FrgTensorC> CUTLASS_DEVICE static void fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {} // Performs the reduction across splits for a given output tile. No fixup is required for // work units returned by this scheduler. template <class FrgTensorC> CUTLASS_DEVICE void fixup(WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) const { } // Returns whether the current WorkTileInfo passed in should continue to be used. Since // this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo // passed in should not be used after having been processed. CUTLASS_DEVICE static bool continue_current_work(WorkTileInfo&) { return false; } template <class ProblemShape, class TileShape> CUTLASS_HOST_DEVICE static int get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape problem_shape, TileShape tile_shape) { // All work units returned by this scheduler cover the entire K iteration // space of the output tile assigned to the work unit. return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape))); } CUTLASS_HOST_DEVICE static uint32_t get_work_k_tile_start(WorkTileInfo const&) { // All work units returned by this scheduler start from K tile 0 return 0u; } CUTLASS_DEVICE static bool need_separate_reduction(Params const& params) { return false; } CUTLASS_DEVICE bool is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) { return false; } template <class FrgTensorC> CUTLASS_DEVICE void separate_reduction( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx) { } // Shares the accumulator set with peers in the global workspace template <class FrgTensorC> CUTLASS_DEVICE static void share( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx) { } CUTLASS_DEVICE static bool valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) { return true; } CUTLASS_DEVICE static bool requires_separate_reduction(Params const& params) { return false; } public: // Sink scheduler params as a member Params scheduler_params; }; } // namespace cutlass::gemm::kernel::detail
cutlass/include/cutlass/gemm/kernel/static_tile_scheduler.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/static_tile_scheduler.hpp", "repo_id": "cutlass", "token_count": 6191 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. SM80 Multi stage kernel expects stage number to be larger or equal to 3 to use asyncronous copy. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core.h" #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h" #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" #include "cutlass/gemm/threadblock/mma_multistage.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::ColumnMajor, double, layout::ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::ColumnMajor; using ElementB = double; using LayoutB = layout::ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; // // Iterators to write to shared memory // /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for double-precision /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::ColumnMajor, double, layout::RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::ColumnMajor; using ElementB = double; using LayoutB = layout::RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; // Shared memory layout using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::RowMajor, double, layout::ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::RowMajor; using ElementB = double; using LayoutB = layout::ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for double-precision /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::RowMajor, double, layout::RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::RowMajor; using ElementB = double; using LayoutB = layout::RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2ColumnMajor, double, layout::AffineRank2ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = double; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for double-precision /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2ColumnMajor, double, layout::AffineRank2RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = double; using LayoutB = layout::AffineRank2RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2RowMajor, double, layout::AffineRank2ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2RowMajor; using ElementB = double; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for double-precision /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2RowMajor, double, layout::AffineRank2RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2RowMajor; using ElementB = double; using LayoutB = layout::AffineRank2RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float-precision /// /// ElementA: complex<float> /// ElementB: complex<float> /// ElementC: complex<float> /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout for A operand typename LayoutA_, /// Layout for B operand typename LayoutB_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA_, /// per-element transformation for elements of B ComplexTransform TransformB_ > struct DefaultMmaCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, LayoutA_, complex<float>, LayoutB_, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB, TransformA_, TransformB_, true> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = LayoutA_; using ElementB = complex<float>; using LayoutB = LayoutB_; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; static const ComplexTransform TransformA = TransformA_; static const ComplexTransform TransformB = TransformB_; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; static_assert( platform::is_same<Operator, arch::OpMultiplyAddComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddComplexFastF32>::value, "The operator tag must indicate complex multiplication."); // // Underlying template // using MmaComplexCore = DefaultMultistageMmaComplexCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, arch::OpClassTensorOp, kStages, TransformA, TransformB, Operator, kCacheOpA, kCacheOpB >; // // Shared memory layouts // using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; // Shared memory layout using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; /// ThreadMap of iterator B using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; /// Policy used to define MmaPipelined using MmaPolicy = typename MmaComplexCore::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// ElementA: complex<double> /// ElementB: complex<double> /// ElementC: complex<double> /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout for A operand typename LayoutA_, /// Layout for B operand typename LayoutB_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA_, /// per-element transformation for elements of B ComplexTransform TransformB_ > struct DefaultMmaCore< Shape_, WarpShape_, InstructionShape_, complex<double>, LayoutA_, complex<double>, LayoutB_, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB, TransformA_, TransformB_, true> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = LayoutA_; using ElementB = complex<double>; using LayoutB = LayoutB_; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; static const ComplexTransform TransformA = TransformA_; static const ComplexTransform TransformB = TransformB_; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; static_assert( platform::is_same<Operator, arch::OpMultiplyAddComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value, "The operator tag must indicate complex multiplication."); // // Underlying template // using MmaComplexCore = DefaultMultistageMmaComplexCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, arch::OpClassTensorOp, kStages, TransformA, TransformB, Operator, kCacheOpA, kCacheOpB >; // // Shared memory layouts // using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; // Shared memory layout using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; /// ThreadMap of iterator B using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; /// Policy used to define MmaPipelined using MmaPolicy = typename MmaComplexCore::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major-interleaved /// B: row-major-interleaved /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes /// /// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m /// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators /// can be reused. The shared store iterator is the same as the crosswise shared /// store iterator. So, the only thing we need to do is to swap the coordinates /// (contiguous <=> strided) used by the global iterator and the shared store /// iterator. template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Number of interleaved K int InterleavedK> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajorInterleaved<InterleavedK>, ElementB_, layout::RowMajorInterleaved<InterleavedK>, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, AccumulatorsInRowMajor, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using ElementB = ElementB_; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; static int const kInterleavedK = InterleavedK; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = kAccessSizeInBits / sizeof_bits<ElementA>::value; static int const kWarpThreadArrangementContiguous = kInterleavedK / kElementsPerAccess; static int const kWarpThreadArrangementStrided = kWarpSize / kWarpThreadArrangementContiguous; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, kInterleavedK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, kInterleavedK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMap< IteratorThreadMapA, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMap< IteratorThreadMapB, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneM) && !((Shape::kK / 32) % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneM), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h", "repo_id": "cutlass", "token_count": 38063 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Blocked-Ell MMA. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/threadblock/mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class EllMmaMultistage : public MmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = MmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE EllMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } template<bool is_A_sparse, bool is_offset_constant> CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, EllIterator &ell_iter, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); bool is_valid = iterator_A.valid(); if (!is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iter.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; } else { int k_offset = iterator_A.get_k(); auto ell_offset = ell_iter.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); bool is_valid = iterator_B.valid(); if (is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iter.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; } else { int k_offset = iterator_B.get_k(); auto ell_offset = ell_iter.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate template<bool is_A_sparse, bool is_offset_constant> CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< initial value of accumulator FragmentC const &src_accum, EllIterator &ell_iterator ) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; auto gmem_ptr = iterator_A.get(); bool is_valid = iterator_A.valid(); if (!is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iterator.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; } else { int k_offset = iterator_A.get_k(); auto ell_offset = ell_iterator.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; auto gmem_ptr = iterator_B.get(); bool is_valid = iterator_B.valid(); if (is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iterator.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; } else { int k_offset = iterator_B.get_k(); auto ell_offset = ell_iterator.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); ++ell_iterator; this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); if (is_A_sparse){ iterator_A.ell_add_mask(ell_iterator.get_blocksize()); } else { iterator_B.ell_add_mask(ell_iterator.get_blocksize()); } int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum; if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma( tmp_accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], tmp_accum ); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); } // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; copy_tiles_and_advance<is_A_sparse, is_offset_constant>( iterator_A, iterator_B, ell_iterator, group_start_iteration_A, group_start_iteration_B); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; copy_tiles_and_advance<is_A_sparse, is_offset_constant>( iterator_A, iterator_B, ell_iterator, group_start_iteration_A, group_start_iteration_B); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); ++ell_iterator; this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); } } if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum = plus_accum(accum, tmp_accum); } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h", "repo_id": "cutlass", "token_count": 10488 }
42
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { //////////////////////////////////////////////////////////////////////////////// // Shuffle registers for layout conversion //////////////////////////////////////////////////////////////////////////////// template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment, /// Identifies A or B multiplicand Operand Operand_, /// typename Enable = void > struct FragmentShuffler { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand_; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { return src; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) /// for operand A multiplicand going through upcasting. template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment > struct FragmentShuffler <ElementMma_, ElementLoad_, NumMmaInstructions, NumElementsInWarpFragment, NumElementsInMmaFragment, Operand::kA, typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) && (sizeof_bits<ElementLoad_>::value == 8)>::type> { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand::kA; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; static uint32_t const kSelectBytesEvenThread = 0x5410; static uint32_t const kSelectBytesOddThread = 0x7632; private: int delta_up_; int delta_down_; int odd_even_lane_id_; uint32_t byte_selector_; public: CUTLASS_DEVICE FragmentShuffler() { int lane_id = cutlass::arch::LaneId(); delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); delta_down_ = 2 - delta_up_; odd_even_lane_id_ = static_cast<int>(lane_id & 1); byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; } CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { WarpFragment result; MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const*>(&src); MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment*>(&result); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < kNumMmaInstructions; n++) { uint32_t const* src_ptr = reinterpret_cast<uint32_t const *>(&mma_frag_src_ptr[n]); uint32_t *dst_ptr = reinterpret_cast<uint32_t *>(&mma_frag_dst_ptr[n]); // Shuffle data within the warp, pull from other threads within the warp uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); uint32_t tmp2 = __shfl_up_sync(0xFFFFFFFF, src_ptr[1], delta_up_); uint32_t tmp3 = __shfl_down_sync(0xFFFFFFFF, src_ptr[1], delta_down_); // Reorder the data within the 32-bit word (4x8b) required for mma.sync dst_ptr[0] = __byte_perm(tmp0, tmp2, byte_selector_); dst_ptr[1] = __byte_perm(tmp1, tmp3, byte_selector_); } return result; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) /// for operand B multiplicand going through upcasting. template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment > struct FragmentShuffler <ElementMma_, ElementLoad_, NumMmaInstructions, NumElementsInWarpFragment, NumElementsInMmaFragment, Operand::kB, typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) && (sizeof_bits<ElementLoad_>::value == 8)>::type> { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand::kB; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; static uint32_t const kSelectBytesEvenThread = 0x5410; static uint32_t const kSelectBytesOddThread = 0x7632; private: int delta_up_; int delta_down_; int odd_even_lane_id_; uint32_t byte_selector_; public: CUTLASS_DEVICE FragmentShuffler() { int lane_id = cutlass::arch::LaneId(); delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); delta_down_ = 2 - delta_up_; odd_even_lane_id_ = static_cast<int>(lane_id & 1); byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; } CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { WarpFragment result; MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const *>(&src); MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment *>(&result); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < kNumMmaInstructions; n++) { uint32_t const* src_ptr = reinterpret_cast<uint32_t const*>(&mma_frag_src_ptr[n]); uint32_t* dst_ptr = reinterpret_cast<uint32_t*>(&mma_frag_dst_ptr[n]); // Shuffle data within the warp, pull from other threads within the warp uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); // Reorder the data within the 32-bit word (4x8b) required for mma.sync dst_ptr[0] = __byte_perm(tmp0, tmp1, byte_selector_); } return result; } }; //////////////////////////////////////////////////////////////////////////////// // Data type conversion //////////////////////////////////////////////////////////////////////////////// template < /// Destination type typename ElementDst_, /// Source type typename ElementSrc_, /// Number of elements int N, /// typename Enable = void> struct FragmentConverter { using ElementDst = ElementDst_; using ElementSrc = ElementSrc_; // Operand fragment registers in destination and source types using DestinationFragment = Array<ElementDst, N>; using SourceFragment = Array<ElementSrc, N>; FastNumericArrayConverter<ElementDst, ElementSrc, N> convert; CUTLASS_DEVICE DestinationFragment operator()(SourceFragment const &src) const { return convert(src); } }; //////////////////////////////////////////////////////////////////////////////// // Partial specialization for when Destination type is the *same* as // Source type template < /// Data type typename Element, /// Number of elements int N, /// typename Enable> struct FragmentConverter<Element, Element, N, Enable> { using DestinationFragment = Array<Element, N>; using SourceFragment = Array<Element, N>; CUTLASS_DEVICE DestinationFragment operator()(SourceFragment const &src) const { return src; } }; } // namespace detail /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Number of partitions along K dimension int PartitionsK_ = 1, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Used for partial specialization typename Enable = bool > class MmaMixedInputTensorOp { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename Policy::Operator; /// Underlying arch::Mma instruction datatype for A operand using ElementAMma = typename ArchMmaOperator::ElementA; /// Underlying arch::Mma instruction datatype for B operand using ElementBMma = typename ArchMmaOperator::ElementB; /// Underlying arch::Mma instruction datatype for C operand using MmaElementC = typename ArchMmaOperator::ElementC; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Architecture tag from underlying instruction using ArchTag = typename ArchMmaOperator::ArchTag; /// Indicates class of matrix operator using OperatorClass = arch::OpClassTensorOp; /// Shape of underlying instruction using InstructionShape = typename ArchMmaOperator::Shape; /// Complex transform on A operand static ComplexTransform const kTransformA = ComplexTransform::kNone; /// Complex transform on B operand static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Number of threads participating in warp-level matrix product static int const kThreadCount = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// // static int const kLoadShapeK = InstructionShape::kK * // (sizeof_bits<ElementAMma>::value / sizeof_bits<ElementB>::value); public: /// Iterates over the A operand in Shared Memory using IteratorA = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA, MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for A tile in registers (loaded from Shared Memory) using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile in registers (for use in Mma instruction) using TransformedFragmentA = Array<ElementAMma, FragmentA::kElements>; /// Underlying arch::Mma instruction operand fragement for matrix A using MmaOperandA = typename ArchMmaOperator::FragmentA; /// Iterates over the B operand in Shared Memory using IteratorB = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for B tile in registers (loaded from Shared Memory) using FragmentB = typename IteratorB::Fragment; /// Storage for transformed B tile in registers (for use in Mma instruction) using TransformedFragmentB = Array<ElementBMma, FragmentB::kElements>; /// Underlying arch::Mma instruction operand fragement for matrix B using MmaOperandB = typename ArchMmaOperator::FragmentB; /// Iterates over the C operand in memory using IteratorC = MmaTensorOpAccumulatorTileIterator< MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, typename ArchMmaOperator::Shape, typename Policy::OpDelta>; /// Storage for C tile using FragmentC = typename IteratorC::Fragment; /// Underlying arch::Mma instruction operand fragement for matrix C using MmaOperandC = typename ArchMmaOperator::FragmentC; /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN >; public: /// Underlying matrix multiply operator (concept: arch::Mma) ArchMmaOperator mma; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaMixedInputTensorOp() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, TransformedFragmentA const &A, TransformedFragmentB const &B, FragmentC const &C ) const { D = C; MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A); MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B); MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D); CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); if (AccumulatorsInRowMajor) { // matrix B is reordered mma( ptr_D[n_serpentine + m * MmaIterations::kColumn], ptr_A[m], ptr_B[n_serpentine], ptr_D[n_serpentine + m * MmaIterations::kColumn]); } else { mma(ptr_D[m + n_serpentine * MmaIterations::kRow], ptr_A[m], ptr_B[n_serpentine], ptr_D[m + n_serpentine * MmaIterations::kRow]); } } } } /// Transform the operand warp fragment register to the required data types and layout /// for the `cultass::arch::Mma` CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { // Shuffle data within warp to obtain the mma.sync operand layout detail::FragmentShuffler<ElementBMma, ElementB, MmaIterations::kColumn, FragmentB::kElements, MmaOperandB::kElements, Operand::kB> shuffler_B; FragmentB tmp_B; tmp_B = shuffler_B(B); // Convert the B operand to the Mma Instruction operand type detail::FragmentConverter<ElementBMma, ElementB, FragmentB::kElements> convert_B; dst_B = convert_B(tmp_B); FragmentA tmp_A; Array<ElementA, FragmentA::kElements / 2> * ptr_tmp_A = reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> *>(&tmp_A); Array<ElementAMma, FragmentA::kElements / 2> * ptr_dst_A = reinterpret_cast<Array<ElementAMma, FragmentA::kElements / 2> *>(&dst_A); // Shuffle data within warp to obtain the mma.sync operand layout detail::FragmentShuffler<ElementAMma, ElementA, MmaIterations::kRow, FragmentA::kElements, MmaOperandA::kElements, Operand::kA> shuffler_A; // Convert the A operand to the Mma Instruction operand type detail::FragmentConverter<ElementAMma, ElementA, FragmentA::kElements / 2> convert_A; tmp_A = shuffler_A(A); ptr_dst_A[0] = convert_A(ptr_tmp_A[0]); ptr_dst_A[1] = convert_A(ptr_tmp_A[1]); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h", "repo_id": "cutlass", "token_count": 7131 }
43
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/wmma_array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity (A or B) Operand Operand, /// Data type of operand typename Element_, /// Layout of operand typename Layout_, /// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::load_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) int OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator< Shape_, Operand::kA, Element_, Layout_, OpDelta_, 32, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *WMMA operations static int const kOpDelta = OpDelta_; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Stride Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape for operand A (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kM, Policy::Operator::Shape::kK >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Shape of individual WMMA load / stores for operand A using Iterations = MatrixShape< Shape::kRow / WmmaShape::kRow, 1 >; /// Fragment object holding a warps part using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentA, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically assert this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// This iterator is specalized for Operand A static_assert(kOperand == Operand::kA, "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma."); /// Supported memory layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); ///////////////////////////////////////////////////////////////////////////////////////////////////// private: /// Shared memory base pointers - not advanced char const *pointer_; /// Byte offset into shared memory - advanced Index byte_offset_; /// Stride in units of number of elements StrideIndex stride_; /// Layout of shared memory Layout layout_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator( TensorRef const &ref, int lane_id ): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += (offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator++() { Index elements_offset = layout_({0, WmmaShape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator--() { Index elements_offset = layout_({0, WmmaShape::kColumn}); byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset); nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::load_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) int OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator< Shape_, Operand::kB, Element_, Layout_, OpDelta_, 32, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *WMMA operations static int const kOpDelta = OpDelta_; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Stride Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kK, Policy::Operator::Shape::kN >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Shape of individual WMMA load / stores for operand B using Iterations = MatrixShape< 1, Shape::kColumn / WmmaShape::kColumn >; /// Fragment object holding a warps part using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentB, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically asserts this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// This iterator is specalized for Operand B static_assert(kOperand == Operand::kB, "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma."); /// Supported memory layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); ///////////////////////////////////////////////////////////////////////////////////////////////////// private: /// Shared memory base pointers - not advanced char const *pointer_; /// Byte offset into shared memory - advanced Index byte_offset_; /// Stride in units of number of elements StrideIndex stride_; /// Layout of shared memory Layout layout_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator( TensorRef const &ref, int lane_id ): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += (offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator++() { Index elements_offset = layout_({WmmaShape::kRow, 0}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator--() { Index elements_offset = layout_({WmmaShape::kRow, 0}); byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset); nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape) typename OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaAccumulatorTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::store_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand in memory typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) typename OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaAccumulatorTileIterator { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kM, Policy::Operator::Shape::kN >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Map cutlass::layout to nvuda::wmma::layout_t enum static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout<Layout>::value; /// Shape of individual WMMA load / stores for accumulator using Iterations = MatrixShape< Shape::kRow / WmmaShape::kRow, Shape::kColumn / WmmaShape::kColumn >; /// Fragment object holding a thread's part of a tile using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentC, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically asserts this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// Supported layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); private: /// Internal reference cutlass::TensorRef<Element, Layout> ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { const WmmaDataType * ptr = reinterpret_cast<const WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { WmmaDataType * ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; } // namespace warp } // namespace gemm } // namespace cutlass //////////////////////////////////////////////////////////////////////////////// #endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h", "repo_id": "cutlass", "token_count": 8607 }
44
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/pitch_linear_coord.h" namespace cutlass { namespace layout { template <int Contiguous, int Strided> using PitchLinearShape = cutlass::PitchLinearShape < Contiguous, Strided >; using PitchLinearCoord = PitchLinearCoord; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Mapping function for pitch-linear memory class PitchLinear { public: /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, LongIndex>; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE PitchLinear(LongIndex ldm = 0): stride_(ldm) { } /// Constructor CUTLASS_HOST_DEVICE PitchLinear(Stride _stride): stride_(_stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static PitchLinear packed(TensorCoord const &extent) { return PitchLinear(extent.contiguous()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return LongIndex(coord.contiguous()) + LongIndex(coord.strided()) * LongIndex(stride_[0]); } /// Returns the logical coordinate given an offset. CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex index) const { return make_Coord( TensorCoord::Index(index % stride_[0]), TensorCoord::Index(index / stride_[0]) ); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE LongIndex stride(int rank) const { return stride_[rank]; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE LongIndex & stride(int rank) { return stride_[rank]; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent.strided() * stride_[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass
cutlass/include/cutlass/layout/pitch_linear.h/0
{ "file_path": "cutlass/include/cutlass/layout/pitch_linear.h", "repo_id": "cutlass", "token_count": 1370 }
45
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines container classes and iterators for managing a statically sized vector of boolean predicates. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #include <cuda/std/cstdint> #else #include <assert.h> #include <stdint.h> #endif #include "cutlass/cutlass.h" #include "cutlass/platform/platform.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_vector_concept Predicate Vector Concept @{ Implementations of \ref predicate_vector_concept contain an ordered set of boolean predicates which may be used as conditionals in other device-side operations. Both random access and iterators offering sequential access are provided. @par Predicate Vector A \ref predicate_vector_concept satisfies the following expressions - <b>at(int idx)</b> - returns the value of the indexed predicate - <b>set(int idx, bool value)</b> - sets the value of the indexed predicate - <b>begin()</b> - returns a \ref predicate_iterator_concept pointing to the first predicate @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_iterator_concept Predicate Iterator Concept @{ Implementations of \ref predicate_iterator_concept enables accessing and traversing elements of a bit vector. @par Const Predicate Iterator A const \ref predicate_iterator_concept satisfies the following expressions - <b>++it</b> increments the iterator to the next predicate - <b>*it</b> returns the value of the currently pointed-to predicate @par Mutable Predicate Iterator A \ref predicate_iterator_concept that is non-const <b>also</b> satisfies the following expressions - <b>it.set(bool value)</b> sets the value of the currently pointed-to predicate @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_tile_adapter Predicate Tile Adapter Concept @{ Implementations of \ref predicate_tile_adapter provide a mapping between a the elements of a \ref tile_traits_concept and a \ref predicate_vector_concept. @par Predicate Tile Adapter A \ref predicate_tile_adapter satisfies the following expressions - <b>at(int d, int h, int w, int c)</b> - returns the value of a predicate corresponding to the access (d, h, w, c) within the tile. @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically sized array of bits implementing @concept{predicate_vector_concept}. template < /// Number of predicates contained in predicate vector int kPredicates_, /// Number of predicates contained in each byte of internal storage int kPredicatesPerByte_ = 4, /// Location of first predicate within byte of internal storage int kPredicateStart_ = 0> struct PredicateVector { /// Number of bits stored by the PredicateVector static constexpr int kPredicates = kPredicates_; /// Number of bits stored within each byte of the predicate bit vector static constexpr int kPredicatesPerByte = kPredicatesPerByte_; /// First bit within each byte containing predicates static constexpr int kPredicateStart = kPredicateStart_; // Make sure no one tries to put more than 8 bits in a byte :) static_assert(kPredicatesPerByte <= 8, "kPredicatesPerByte must fit within an actual byte"); // Make sure the "offsetted" bits fit in one byte. static_assert(kPredicateStart + kPredicatesPerByte <= 8, "The offsetted predicates must fit within an actual byte."); /// Storage type of individual elements typedef uint32_t Storage; /// Number of bytes needed static constexpr int kBytes = (kPredicates + kPredicatesPerByte - 1) / kPredicatesPerByte; /// Number of storage elements needed static constexpr int kWordCount = (kBytes + int(sizeof(Storage)) - 1) / int(sizeof(Storage)); /// The byte mask corresponding to predicates static constexpr Storage kByteMask = (((1 << kPredicatesPerByte) - 1) << kPredicateStart); private: // // Data members // /// Words of bit vector Storage storageData[kWordCount]; // // Methods // /// Computes the word and bit corresponding to a logical predicate index CUTLASS_HOST_DEVICE void computeStorageOffset(int &word, int &bit, int idx) const { CUTLASS_ASSERT(idx < kPredicates); int byte = (idx / kPredicatesPerByte); int bit_offset = (idx % kPredicatesPerByte); word = byte / sizeof(Storage); int byte_offset = (byte % sizeof(Storage)); bit = byte_offset * 8 + bit_offset + kPredicateStart; } /// Returns word mask. CUTLASS_HOST_DEVICE static constexpr bool computeWordMask() { Storage mask(0); CUTLASS_PRAGMA_UNROLL for (size_t byte = 0; byte < sizeof(Storage); ++byte) { mask |= (kByteMask << (byte * 8)); } return mask; } /// Returns mask of last word. CUTLASS_HOST_DEVICE static constexpr bool computeLastWordMask() { Storage mask(0); CUTLASS_PRAGMA_UNROLL for (int byte = 0; byte < kBytes % sizeof(Storage); ++byte) { mask |= (kByteMask << (byte * 8)); } return mask; } /// Accesses a given word with optional assertions CUTLASS_HOST_DEVICE Storage &storage(int word) { CUTLASS_ASSERT(word < kWordCount); return storageData[word]; } /// Accesses a given word with optional assertions CUTLASS_HOST_DEVICE Storage const &storage(int word) const { CUTLASS_ASSERT(word < kWordCount); return storageData[word]; } public: // // Iterator // /** * @brief An iterator implementing \ref predicate_iterator_concept enabling sequential * read and write access to predicates. * @concept{predicate_iterator_concept} */ class Iterator { /// Reference to PredicateVector instance PredicateVector &vec_; /// Index into PredicateVector int bit_; public: /// Copy constructor CUTLASS_HOST_DEVICE Iterator(Iterator const &it) : vec_(it.vec_), bit_(it.bit_) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE Iterator(PredicateVector &vec, int _start = 0) : vec_(vec), bit_(_start) {} /// Pre-increment CUTLASS_HOST_DEVICE Iterator &operator++() { ++bit_; return *this; } /// Increment CUTLASS_HOST_DEVICE Iterator &operator+=(int offset) { bit_ += offset; return *this; } /// Pre-decrement CUTLASS_HOST_DEVICE Iterator &operator--() { --bit_; return *this; } /// Decrement CUTLASS_HOST_DEVICE Iterator &operator-=(int offset) { bit_ -= offset; return *this; } /// Post-increment CUTLASS_HOST_DEVICE Iterator operator++(int) { Iterator ret(*this); ret.bit_++; return ret; } /// Post-decrement CUTLASS_HOST_DEVICE Iterator operator--(int) { Iterator ret(*this); ret.bit_--; return ret; } /// Iterator advances by some amount CUTLASS_HOST_DEVICE Iterator operator+(int offset) { Iterator ret(*this); ret.bit_ += offset; return ret; } /// Iterator recedes by some amount CUTLASS_HOST_DEVICE Iterator operator-(int offset) { ConstIterator ret(*this); ret.bit_ -= offset; return ret; } /// Returns true if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator==(Iterator const &it) const { return bit_ == it.bit_; } /// Returns false if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator!=(Iterator const &it) const { return bit_ != it.bit_; } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool get() { return vec_.at(bit_); } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool at() const { return vec_.at(bit_); } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return at(); } /// Sets the bit at the pointed to location CUTLASS_HOST_DEVICE void set(bool value = true) { vec_.set(bit_, value); } }; /** * @brief An iterator implementing \ref predicate_iterator_concept enabling sequential * read and write access to predicates. * @concept{predicate_iterator_concept} */ class ConstIterator { /// Reference to PredicateVector instance PredicateVector const &vec_; /// Index into PredicateVector int bit_; public: /// Copy constructor CUTLASS_HOST_DEVICE ConstIterator(ConstIterator const &it) : vec_(it.vec_), bit_(it.bit_) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE ConstIterator(PredicateVector const &vec, int _start = 0) : vec_(vec), bit_(_start) {} /// Pre-increment CUTLASS_HOST_DEVICE ConstIterator &operator++() { ++bit_; return *this; } /// Increment CUTLASS_HOST_DEVICE ConstIterator &operator+=(int offset) { bit_ += offset; return *this; } /// Pre-decrement CUTLASS_HOST_DEVICE ConstIterator &operator--() { --bit_; return *this; } /// Decrement CUTLASS_HOST_DEVICE ConstIterator &operator-=(int offset) { bit_ -= offset; return *this; } /// Post-increment CUTLASS_HOST_DEVICE ConstIterator operator++(int) { ConstIterator ret(*this); ret.bit_++; return ret; } /// Post-decrement CUTLASS_HOST_DEVICE ConstIterator operator--(int) { ConstIterator ret(*this); ret.bit_--; return ret; } /// Iterator advances by some amount CUTLASS_HOST_DEVICE ConstIterator operator+(int offset) { ConstIterator ret(*this); ret.bit_ += offset; return ret; } /// Iterator recedes by some amount CUTLASS_HOST_DEVICE ConstIterator operator-(int offset) { ConstIterator ret(*this); ret.bit_ -= offset; return ret; } /// Returns true if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator==(ConstIterator const &it) const { return bit_ == it.bit_; } /// Returns false if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator!=(ConstIterator const &it) const { return bit_ != it.bit_; } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool get() { return vec_.at(bit_); } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool at() const { return vec_.at(bit_); } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return at(); } }; /// Iterator that always returns true struct TrivialIterator { /// Constructor CUTLASS_HOST_DEVICE TrivialIterator() {} /// Copy constructor CUTLASS_HOST_DEVICE TrivialIterator(Iterator const &it) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE TrivialIterator(PredicateVector const &_vec) {} /// Pre-increment CUTLASS_HOST_DEVICE TrivialIterator &operator++() { return *this; } /// Post-increment CUTLASS_HOST_DEVICE TrivialIterator operator++(int) { return *this; } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return true; } }; public: // // Methods // /// Initialize the predicate vector CUTLASS_HOST_DEVICE PredicateVector(bool value = true) { fill(value); } /// Fills all predicates with a given value CUTLASS_HOST_DEVICE void fill(bool value = true) { Storage item = (value ? ~Storage(0) : Storage(0)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = item; } } /// Clears all predicates CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = 0; } } /// Sets all predicates to true CUTLASS_HOST_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = ~Storage(0); } } /// Accesses a bit within the predicate vector. CUTLASS_HOST_DEVICE bool operator[](int idx) const { return at(idx); } /// Accesses a bit within the predicate vector. CUTLASS_HOST_DEVICE bool at(int idx) const { int bit, word; computeStorageOffset(word, bit, idx); return ((storage(word) >> bit) & 1); } /// Set a bit within the predicate vector. CUTLASS_HOST_DEVICE void set(int idx, bool value = true) { int bit, word; computeStorageOffset(word, bit, idx); Storage disable_mask = (~(Storage(1) << bit)); Storage enable_mask = (Storage(value) << bit); storage(word) = ((storage(word) & disable_mask) | enable_mask); } /// Computes the intersection of two identical predicate vectors. CUTLASS_HOST_DEVICE PredicateVector &operator&=(PredicateVector const &predicates) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = (storage(i) & predicates.storage(i)); } return *this; } /// Computes the union of two identical predicate vectors. CUTLASS_HOST_DEVICE PredicateVector &operator|=(PredicateVector const &predicates) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = (storage(i) | predicates.storage(i)); } return *this; } /// Returns true if entire predicate array is zero. CUTLASS_HOST_DEVICE bool is_zero() const { constexpr Storage mask = computeWordMask(); Storage result = 0; CUTLASS_PRAGMA_UNROLL for (int word = 0; word < kWordCount - 1; ++word) { result |= (storage(word) & mask); } constexpr Storage last_word_mask = computeLastWordMask(); result |= (storage(kWordCount - 1) & last_word_mask); return result == 0; } /// Returns an iterator to the start of the bit vector CUTLASS_DEVICE Iterator begin() { return Iterator(*this); } /// Returns an iterator CUTLASS_DEVICE Iterator end() { return Iterator(*this, kPredicates); } /// Returns a ConstIterator CUTLASS_DEVICE ConstIterator const_begin() const { return ConstIterator(*this); } /// Returns a ConstIterator CUTLASS_DEVICE ConstIterator const_end() const { return ConstIterator(*this, kPredicates); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/include/cutlass/predicate_vector.h/0
{ "file_path": "cutlass/include/cutlass/predicate_vector.h", "repo_id": "cutlass", "token_count": 5505 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Provides a mechanism for packing and unpacking elements smaller than one byte */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/integer_subbyte.h" #include "cutlass/fast_math.h" namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// This class provides a mechanism for packing and unpacking elements smaller than one byte. It /// assumes these sub-byte elements are packed in a traditional C++ numeric type. /// /// The intended application is to provide a mechanism to indirectly reference elements in /// memory or Array<> objects whose addresses cannot otherwise be taken since they are smaller /// than one byte. /// /// Supports basic pointer arithmetic: /// /// Example: /// /// int4b_t *ptr = ...; /// /// SubbyteReference<int4b_t> ref = ptr; /// ref += 15; /// /// int4b_t x = ref; // load an int4b_t /// ref = x + 2_s4; // perform arithmetic on int4b_t and then store /// template < typename Element_, /// CUTLASS numeric element type. typename Storage_ = uint8_t, /// Underlying storage type. Must be able to hold an integer /// number of objects of type Element. class = void > class ConstSubbyteReference { public: using Element = Element_; using Storage = Storage_; using StoragePointer = Storage const *; static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value, "Size of Element must not be greater than Storage."); static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value), "Storage must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value; ///! Bit mask Storage const kMask = ((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ? (Storage(1) << sizeof_bits<Element>::value) - Storage(1) : ~Storage(0)); private: /// Pointer to array containing element StoragePointer ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; public: CUTLASS_HOST_DEVICE ConstSubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element const *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StoragePointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element *ptr = nullptr ): ConstSubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StoragePointer storage_pointer() const { return ptr_; } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { Storage item = Storage((*ptr_ >> (offset_ * sizeof_bits<Element>::value)) & kMask); return reinterpret_cast<Element const &>(item); } /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-=(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(ConstSubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ = /// Underlying storage type. Must be able to hold an integer /// number of objects of type Element. #if defined(__CUDA_ARCH__) /// Default size depends on width of atomicCas() overloads. #if (__CUDA_ARCH__ >= 700) /// uint16_t #else uint32_t #endif #else uint8_t #endif , class = void > class SubbyteReference { public: using Element = Element_; using Storage = Storage_; using StoragePointer = Storage *; static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value, "Size of Element must not be greater than Storage."); static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value), "Storage must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value; ///! Bit mask Storage const kMask = ((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ? (Storage(1) << sizeof_bits<Element>::value) - Storage(1) : ~Storage(0)); private: /// Pointer to array containing element StoragePointer ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; public: CUTLASS_HOST_DEVICE SubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StoragePointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr = nullptr ): SubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StoragePointer storage_pointer() const { return ptr_; } /// Gets storage pointer CUTLASS_HOST_DEVICE Element * operator&() const { return reinterpret_cast<Element *>(ptr_); } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { uint8_t const* byte_ptr = reinterpret_cast<uint8_t const*>(ptr_); // Convert offset in elements to offset in bytes constexpr int elements_per_byte = cutlass::sizeof_bits<uint8_t>::value / cutlass::sizeof_bits<Element>::value; byte_ptr += offset_ / elements_per_byte; // Offset of element within a byte int byte_offset = offset_ % elements_per_byte; uint8_t item = uint8_t((*byte_ptr >> (byte_offset * cutlass::sizeof_bits<Element>::value)) & kMask); return reinterpret_cast<Element const &>(item); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference & set(Element const &x) { Storage item = (reinterpret_cast<Storage const &>(x) & kMask); Storage kUpdateMask = Storage(~(kMask << (offset_ * cutlass::sizeof_bits<Element>::value))); Storage new_bits = Storage(item << (offset_ * cutlass::sizeof_bits<Element>::value)); #if defined(__CUDA_ARCH__) // // Homebrew read-modify-write // Storage original; Storage updated; do { original = (*ptr_); updated = Storage((original & kUpdateMask) | new_bits); original = atomicCAS(ptr_, original, updated); } while (updated != original); #else Storage original = (*ptr_); Storage updated = Storage((original & kUpdateMask) | new_bits); *ptr_ = updated; #endif return *this; } //// /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(Element const & x) { return set(x); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(SubbyteReference const & x) { return set(x.get()); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=( ConstSubbyteReference<Element, Storage> const &x) { return set(x.get()); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(int offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(long long offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-(int offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-=(long long offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(SubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> using _war = T; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ /// Underlying basic storage type. > class SubbyteReference<Element_, Storage_, typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> { public: using Element = Element_; ///! Note: Storage unit could not be divisibale by Element, /// Type element may be stored across 2 storage units, so need a storage vector to hold integer /// number of objects of type Element. using StorageUnit = Storage_; static int const kBitsStoredVec = cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value); static int const kNumStorageUnitPerStoredVec = kBitsStoredVec / sizeof_bits<StorageUnit>::value; using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec]; using StorageVecPointer = StorageVec *; using CudaAtomicType = typename platform::conditional< sizeof_bits<StorageUnit>::value == 16, uint32_t, uint64_t >::type; static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value, "Size of Element must not be greater than StorageVec."); static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value), "StorageVec must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value; ///! Bit mask for storage unit. StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1); /// Pointer to array containing element _war<StorageVecPointer> ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; /// Element may be stored across 2 storage unit. /// Low storage unit index in StorageVec /// High storage unit index in StorageVec int low_storage_unit_idx_; int high_storage_unit_idx_; /// Full Mask to extract the entire element uint64_t full_element_mask_; /// Mask to extract the Element from Low storage unit and High storage unit. StorageUnit low_storage_mask_; StorageUnit high_storage_mask_; /// Start bit index inside the storage unit. int start_bit_idx_; private: CUTLASS_HOST_DEVICE void update_element_status() { int num_bits = offset_ * sizeof_bits<Element>::value; start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value; low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value; high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value ? low_storage_unit_idx_ + 1 : low_storage_unit_idx_; full_element_mask_ = uint64_t(kMask) << start_bit_idx_; low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0)); high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0)); } public: CUTLASS_HOST_DEVICE SubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StorageVecPointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); update_element_status(); } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr = nullptr ): SubbyteReference(ptr, 0) { } /// Gets StorageVec pointer CUTLASS_HOST_DEVICE StorageVecPointer storage_pointer() const { return ptr_; } /// Gets StorageVec pointer CUTLASS_HOST_DEVICE Element * operator&() const { return reinterpret_cast<Element *>(ptr_); } /// Gets element offset within StorageVec vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_; StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0; uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits; uint8_t result = uint8_t(full_item >> start_bit_idx_); return reinterpret_cast<Element const &>(result); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference & set(Element const &x) { uint64_t item = static_cast<uint64_t>((reinterpret_cast<uint8_t const &>(x) & kMask)) << start_bit_idx_; StorageUnit low_new_bits = StorageUnit(item & ~StorageUnit(0)); StorageUnit high_new_bits = StorageUnit(item >> sizeof_bits<StorageUnit>::value); StorageUnit const kLowUpdateMask = StorageUnit((~full_element_mask_) & (~StorageUnit(0))); StorageUnit const kHighUpdateMask = StorageUnit(((~full_element_mask_) >> sizeof_bits<StorageUnit>::value) & (~StorageUnit(0))); #if defined(__CUDA_ARCH__) // // Homebrew read-modify-write // if(high_storage_unit_idx_ != low_storage_unit_idx_){ /// Only need update 2 storage unit at once. /// consider misaligned address issue, we need to do atomicCAS twice StorageUnit original_low_bits, original_high_bits, update_low_bits, update_high_bits; do { original_low_bits = ((*ptr_)[low_storage_unit_idx_]); update_low_bits = (original_low_bits & kLowUpdateMask) | low_new_bits; original_low_bits = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original_low_bits, update_low_bits); } while (update_low_bits != original_low_bits); do { original_high_bits = ((*ptr_)[high_storage_unit_idx_]); update_high_bits = (original_high_bits & kHighUpdateMask) | high_new_bits; original_high_bits = atomicCAS(&((*ptr_)[high_storage_unit_idx_]), original_high_bits, update_high_bits); } while (update_high_bits != original_high_bits); } else { /// Only need update 1 storage unit. StorageUnit original, updated; do { original = ((*ptr_)[low_storage_unit_idx_]); updated = (original & kLowUpdateMask) | low_new_bits; original = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original, updated); } while (updated != original); } #else StorageUnit update_low_bits = ((*ptr_)[low_storage_unit_idx_] & kLowUpdateMask) | low_new_bits; StorageUnit update_high_bits = ((*ptr_)[high_storage_unit_idx_] & kHighUpdateMask) | high_new_bits; (*ptr_)[low_storage_unit_idx_] = update_low_bits; if(low_storage_unit_idx_ != high_storage_unit_idx_) (*ptr_)[high_storage_unit_idx_] = update_high_bits; #endif return *this; } //// /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(Element const & x) { return set(x); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(SubbyteReference const & x) { return set(x.get()); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=( ConstSubbyteReference<Element, StorageVec> const &x) { return set(x.get()); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(int offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(long long offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-(int offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-=(long long offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(SubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; template<typename T> using _war = T; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ /// Underlying storage type. Must be able to hold an integer > class ConstSubbyteReference<Element_, Storage_, typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> { public: using Element = Element_; ///! Note: Storage unit could not be divisibale by Element, /// Type element may be stored across 2 storage units, so need a storage vector to hold integer /// number of objects of type Element. using StorageUnit = Storage_; static int const kBitsStoredVec = cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value); static int const kNumStorageUnitPerStoredVec = kBitsStoredVec / sizeof_bits<StorageUnit>::value; using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec]; using StorageVecPointer = StorageVec const *; using CudaAtomicType = typename platform::conditional< sizeof_bits<StorageUnit>::value == 16, uint32_t, uint64_t >::type; static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value, "Size of Element must not be greater than StorageVec."); static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value), "StorageVec must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value; ///! Bit mask for storage unit. StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1); /// Pointer to array containing element _war<StorageVecPointer> ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; /// Element may be stored across 2 storage unit. /// Low storage unit index in StorageVec /// High storage unit index in StorageVec int low_storage_unit_idx_; int high_storage_unit_idx_; /// Full Mask to extract the entire element uint64_t full_element_mask_; /// Mask to extract the Element from Low storage unit and High storage unit. StorageUnit low_storage_mask_; StorageUnit high_storage_mask_; /// Start bit index inside the storage unit. int start_bit_idx_; private: CUTLASS_HOST_DEVICE void update_element_status() { int num_bits = offset_ * sizeof_bits<Element>::value; start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value; low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value; high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value ? low_storage_unit_idx_ + 1 : low_storage_unit_idx_; full_element_mask_ = uint64_t(kMask) << start_bit_idx_; low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0)); high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0)); } public: CUTLASS_HOST_DEVICE ConstSubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element const *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StorageVecPointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); update_element_status(); } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element *ptr = nullptr ): ConstSubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StorageVecPointer storage_pointer() const { return ptr_; } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_; StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0; uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits; uint8_t result = uint8_t(full_item >> start_bit_idx_); return reinterpret_cast<Element const &>(result); } /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-=(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(ConstSubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, bool subbyte = (sizeof_bits<Element>::value < 8)> struct ReferenceFactory; template <typename Element> struct ReferenceFactory<Element, false> { ///! Number of elements per storage vector static int const kElementsPerVector = 1; CUTLASS_HOST_DEVICE static Element &get(Element *ptr, int64_t offset) { return ptr[offset]; } CUTLASS_HOST_DEVICE static Element const &get(Element const *ptr, int64_t offset) { return ptr[offset]; } CUTLASS_HOST_DEVICE static Element *add_pointer_offset(Element *ptr, int64_t offset) { return ptr + offset; } CUTLASS_HOST_DEVICE static Element const *add_pointer_offset(Element const *ptr, int64_t offset) { return ptr + offset; } }; template <typename Element> struct ReferenceFactory<Element, true> { // // Static methods // CUTLASS_HOST_DEVICE static SubbyteReference<Element> get(Element *ptr, int64_t offset) { return SubbyteReference<Element>(ptr, offset); } CUTLASS_HOST_DEVICE static ConstSubbyteReference<Element> get(Element const *ptr, int64_t offset) { return ConstSubbyteReference<Element>(ptr, offset); } /// Helper to add an offset in number of elements, assuming this offset is divisible /// by the vector size. CUTLASS_HOST_DEVICE static Element *add_pointer_offset(Element *ptr, int64_t offset_in_elements) { return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8; } /// Helper to add an offset in number of elements, assuming this offset is divisible /// by the vector size. CUTLASS_HOST_DEVICE static Element const *add_pointer_offset(Element const *ptr, int64_t offset_in_elements) { return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/include/cutlass/subbyte_reference.h/0
{ "file_path": "cutlass/include/cutlass/subbyte_reference.h", "repo_id": "cutlass", "token_count": 13259 }
47
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of scale and bias vectors. This iterator uses masks to guard out-of-bounds accesses. It can be used to load the gamma and beta vectors of layernorm which is loop variant. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/conv/threadblock/conv2d_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedScaleBiasVectorAccessIterator /// template <typename ThreadblockShape, typename Element, typename Layout> class PredicatedScaleBiasVectorAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data. /// template <typename ThreadblockShape_, typename Element_> class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_, Element_, layout::PitchLinear> { public: using ThreadblockShape = ThreadblockShape_; using Element = Element_; using Layout = layout::PitchLinear; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess; using AccessType = AlignedArray<Element, kElementsPerAccess>; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Internal pointer to first access of tile BytePointer pointer_; TensorCoord thread_offset_; int problem_size_k_; /// Used for out-of-order visitation bool is_residue_tile_; bool guard_; TensorCoord::Index residue_size_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Extent of tensor int problem_size_k, /// Pointer to the start of the scale vector ConstPointer scale_pointer, /// Pointer to the start of the bias vector ConstPointer bias_pointer, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) { pointer_ = (thread_id < kThreads) ? reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(scale_pointer)) : reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(bias_pointer)); // Per-thread offset in logical coordinates of tensor int thread_base = (thread_id < kThreads) ? 0 : kThreads; problem_size_k_ = problem_size_k; is_residue_tile_ = true; residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous; if (residue_size_ == 0) { residue_size_ = ThreadblockShape::kContiguous; } guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_; thread_offset_ = threadblock_offset + TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0); set_iteration_index(0); } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( /// Extent of tensor int problem_size_k, /// Pointer to start of scale vector ConstPointer scale_pointer, /// Pointer to start of scale vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id) : PredicatedScaleBiasVectorAccessIterator(problem_size_k, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) {} /// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { guard_ = threadIdx.x < kThreads * 2; TensorCoord offset = is_residue_tile_ ? TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0) : TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0); thread_offset_ = thread_offset_ + offset; is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>( pointer_ + (thread_offset_.contiguous() * sizeof_bits<Element>::value / 8)); } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator &operator++() { return *this; } /// Increment and return an instance to self. CUTLASS_DEVICE PredicatedScaleBiasVectorAccessIterator operator++(int) { PredicatedScaleBiasVectorAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { guard_ &= (!enable); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return guard_; } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename ThreadblockShape_, typename Element_> class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_, Element_, layout::RowMajor> { public: using ThreadblockShape = ThreadblockShape_; using Element = Element_; using Layout = layout::RowMajor; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ConstPointer = const Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator< layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>, Element, layout::PitchLinear>; using AccessType = typename UnderlyingIterator::AccessType; static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( ///< Extent of tensor int problem_size_k, ///< Pointer to the start of the scale vector ConstPointer scale_pointer, ///< Pointer to the start of the bias vector ConstPointer bias_pointer, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(problem_size_k, scale_pointer, bias_pointer, thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator( int problem_size_k, ///< Extent of tensor ConstPointer scale_pointer, ///< Pointer to the start of the scale vector ConstPointer bias_pointer, ///< Pointer to the start of the bias vector int thread_id ///< ID of each participating thread ) : PredicatedScaleBiasVectorAccessIterator(problem_size_k, scale_pointer, bias_pointer, thread_id, make_Coord(0, 0)) {} /// Advances an iterator along logical dimensions of matrix in units of whole /// threadblock tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedScaleBiasVectorAccessIterator operator++(int) { PredicatedScaleBiasVectorAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h", "repo_id": "cutlass", "token_count": 4311 }
48
# CuTe Layout Algebra CuTe provides an "algebra of `Layout`s" to support combining layouts in different ways. This algebra includes operations such as * `Layout` functional composition, * a notion of `Layout` "product" to reproduce one layout according to another, and * a notion of `Layout` "divide" to split one layout according to another. Common utilities for building complicated layouts from simpler ones depend on the `Layout` product. Common utilities for partitioning layouts (of data, for example) across other layouts (of threads, for example) depend on the `Layout` divide. All of these utilities rely on the functional composition of `Layout`s. In this section, we'll build up the tools of the `Layout` algebra and explain some of these core operations in detail. ## Coalesce In the previous section, we summarized `Layout`s with > Layouts are functions from integers to integers. The `coalesce` operation is a "simplify" on functions from integers to integers. If we only care about input integers, then we can manipulate the shape and number of modes of the `Layout` without changing it as a function. The only thing `coalesce` can't change is the `Layout`'s `size`. More specifically, you can find the checked post-conditions in [the `coalesce` unit test](../../../test/unit/cute/core/coalesce.cpp), which we'll reproduce here: ```cpp // @post size(@a result) == size(@a layout) // @post depth(@a result) <= 1 // @post for all i, 0 <= i < size(@a layout), @a result(i) == @a layout(i) Layout coalesce(Layout const& layout) ``` For example, ```cpp auto layout = Layout<Shape <_2,Shape <_1,_6>>, Stride<_1,Stride<_6,_2>>>{}; auto result = coalesce(layout); // _12:_1 ``` where we can see the result has fewer modes and is "simpler." Indeed, this could save us a few operations in the coordinate mapping and index mapping (if those are performed dynamically). So, how do we get there? * We've already seen that column-major `Layout`s like `(_2,_4):(_1,_2)` act identically to `_8:_1` for 1-D coordinates. * Modes with size static-1 will always produce a natural coordinate of static-0. They can be ignored no matter the stride. Generalizing, consider a layout with just two integral modes, s0:d0 and s1:d1. Denote the result of coalescing this layout as s0:d0 ++ s1:d1. Then, there are four cases: 1. `s0:d0 ++ _1:d1 => s0:d0`. Ignore modes with size static-1. 2. `_1:d0 ++ s1:d1 => s1:d1`. Ignore modes with size static-1. 3. `s0:d0 ++ s1:s0*d0 => s0*s1:d0`. If the second mode's stride is the product of the first mode's size and stride, then they can be combined. 4. `s0:d0 ++ s1:d1 => (s0,s1):(d0,d1)`. Else, nothing can be done and they must be treated separately. That's it! We can flatten any layout and apply the above binary operation to each pair of adjacent modes in order to "coalesce" the modes of the layout. ### By-mode Coalesce Obviously, sometimes we do care about the shape of our `Layout`, but would still like to coalesce. For example, I have a 2-D `Layout` and I would like the result to remain 2-D. For this reason, there's an overload of `coalesce` that takes an additional parameter ```cpp // Apply coalesce at the terminals of trg_profile Layout coalesce(Layout const& layout, IntTuple const& trg_profile) ``` which can be used as follows ```cpp auto a = Layout<Shape <_2,Shape <_1,_6>>, Stride<_1,Stride<_6,_2>>>{}; auto result = coalesce(a, Step<_1,_1>{}); // (_2,_6):(_1,_2) // Identical to auto same_r = make_layout(coalesce(layout<0>(a)), coalesce(layout<1>(a))); ``` This function is recursing into `Step<_1,_1>{}` and applying `coalesce` to the corresponding sublayout whenever it sees an integer (the values don't matter, they're just flags) rather than a tuple. > This theme of defining an operation that treats a `Layout` as a "1-D" function from integers to integers and then generalizing to use it for an arbitrarily shaped layout will be a common one! ## Composition Functional composition of `Layout`s is the core of CuTe and is used in just about every higher-level operation. Starting again from the observation that `Layout`s are just functions from integers to integers, we can define functional composition that results in another `Layout`. First, an example. ```text Functional composition, R := A o B R(c) := (A o B)(c) := A(B(c)) Example A = (6,2):(8,2) B = (4,3):(3,1) R( 0) = A(B( 0)) = A(B(0,0)) = A( 0) = A(0,0) = 0 R( 1) = A(B( 1)) = A(B(1,0)) = A( 3) = A(3,0) = 24 R( 2) = A(B( 2)) = A(B(2,0)) = A( 6) = A(0,1) = 2 R( 3) = A(B( 3)) = A(B(3,0)) = A( 9) = A(3,1) = 26 R( 4) = A(B( 4)) = A(B(0,1)) = A( 1) = A(1,0) = 8 R( 5) = A(B( 5)) = A(B(1,1)) = A( 4) = A(4,0) = 32 R( 6) = A(B( 6)) = A(B(2,1)) = A( 7) = A(1,1) = 10 R( 7) = A(B( 7)) = A(B(3,1)) = A(10) = A(4,1) = 34 R( 8) = A(B( 8)) = A(B(0,2)) = A( 2) = A(2,0) = 16 R( 9) = A(B( 9)) = A(B(1,2)) = A( 5) = A(5,0) = 40 R(10) = A(B(10)) = A(B(2,2)) = A( 8) = A(2,1) = 18 R(11) = A(B(11)) = A(B(3,2)) = A(11) = A(5,1) = 42 ``` The absolutely amazing observation is that the function `R(c) = k` defined above can be written down as another `Layout` ``` R = ((2,2),3):((24,2),8) ``` AND ``` compatible(B, R) ``` That is, every coordinate of `B` can also be used as a coordinate of `R`. This is an expected property of functional composition because `B` defines the *domain* of `R`. You can find many examples and checked post-conditions in [the `composition` unit test](../../../test/unit/cute/core/composition.cpp). The post-conditions are precisely as we just stated. ```cpp // @post compatible(@a layout_b, @a result) // @post for all i, 0 <= i < size(@a layout_b), @a result(i) == @a layout_a(@a layout_b(i))) Layout composition(LayoutA const& layout_a, LayoutB const& layout_b) ``` ### Computing Composition First, a few observations: * `B = (B_0, B_1, ...)`. A layout can be expressed as the concatenation of its sublayouts. * `A o B = A o (B_0, B_1, ...) = (A o B_0, A o B_1, ...)`. When `B` is injective, composition is left-distributive with concatenation. With the above, we can assume without loss of generality that `B = s:d` is a layout with integral shape and stride. We can also assume that `A` is a flattened, coalesced layout. When `A` is integral, `A = a:b`, the result is rather trivial: `R = A o B = a:b o s:d = s:(b*d)`. But when `A` is multimodal, we need to be more careful. Put into words, `A o B = A o s:d`, for integral `s` and `d` means that we want (1) every `d`th element of `A`, and then (2) keep the first `s` of those strided elements. 1. Every `d`th element of `A` can be computed by "dividing out" the first `d` elements from the shape of `A`. For an array of integers representing the shape, this is computed as ```cpp void shape_div(int* shapeA, int N, int& strideB) { for (int i = 0; i < N; ++i) { assert(shapeA[i] % strideB == 0 or strideB % shapeA[i] == 0); int new_shape = ceil_div(shapeA[i], strideB); int new_stride = ceil_div(strideB, shapeA[i]); shapeA[i] = new_shape; strideB = new_stride; } } ``` which progressively "removes" the first `strideB` elements from `shapeA` starting from the left. For example, * `(6,2) / 2 => (3,2)` * `(6,2) / 3 => (2,2)` * `(6,2) / 6 => (1,2)` * `(6,2) / 12 => (1,1)` * `(3,6,2,8) / 6 => (1,3,2,8)` * `(3,6,2,8) / 9 => (1,2,2,8)` * `(42,16,3) / 2 => (21,16,3)` * `(42,16,3) / 6 => ( 7,16,3)` As you may have noticed, we can only divide shapes by certain values and get a sensible result. This is called the **divisibility condition** and is enforced by the `assert` in the above code and statically checked in CuTe when possible. 2. The first `s` elements of the strided `A` layout can be computed by "modding out" the first `s` elements from the shape of `A`. For an array of integers representing the shape, this is computed as ```cpp void shape_mod(int* shapeA, int N, int& shapeB) { for (int i = 0; i < N; ++i) { assert(shapeA[i] % shapeB == 0 or shapeB % shapeA[i] == 0); int new_shapeA = min(shapeA[i], shapeB); int new_shapeB = ceil_div(shapeB, shapeA[i]); shapeA[i] = new_shapeA; shapeB = new_shapeB; } } ``` which progressibly "keeps" the first `shapeB` elements from `shapeA` starting from the left. For example, * `(6,2) % 2 => (2,1)` * `(6,2) % 3 => (3,1)` * `(6,2) % 6 => (6,1)` * `(6,2) % 12 => (6,2)` * `(3,6,2,8) % 6 => (3,2,1,1)` * `(3,6,2,8) % 9 => (3,3,1,1)` * `(1,2,2,8) % 2 => (1,2,1,1)` * `(1,2,2,8) % 16 => (1,2,2,4)` Again, this operation must satisfy the divisibility condition to yield a sensible result. This is enforced by the `assert` in the above code and statically checked in CuTe when possible. Clearly, CuTe does not use arrays to store shapes or strides and the above code is for explication only. CuTe works with shapes and strides as `IntTuple`s and the implementation is expressed as algorithmic `fold`s which carefully account for static and dynamic integers. #### Example 1 -- Reshape a layout into a matrix `20:2 o (5,4):(4,1)`. Composition formulation. This describes interpreting the layout `20:2` as a 5x4 matrix in a row-major order. 1. ` = 20:2 o (5:4,4:1)`. Layout `(5,4):(4,1)` as concatenation of sublayouts. 2. ` = (20:2 o 5:4, 20:2 o 4:1)`. Left distributivity. * `20:2 o 5:4 => 5:8`. Trivial case. * `20:2 o 4:1 => 4:2`. Trivial case. 3. ` = (5:8, 4:2)`. Composed Layout as concatenation of sublayouts. 4. ` = (5,4):(8,2)`. Final composed layout. #### Example 2 -- Reshape a layout into a matrix `(10,2):(16,4) o (5,4):(1,5)` This describes interpreting the layout `(10,2):(16,4)` as a 5x4 matrix in a column-major order. 1. ` = (10,2):(16,4) o (5:1,4:5)`. Layout `(5,4):(1,5)` as concatenation of sublayouts. 2. ` = ((10,2):(16,4) o 5:1, (10,2):(16,4) o 4:5)`. Left distributivity. * `(10,2):(16,4) o 5:1 => (5,1):(16,4)`. Mod out the shape `5`. * `(10,2):(16,4) o 4:5 => (2,2):(80,4)`. Div out the stride `5`. 3. ` = ((5,1):(16,4), (2,2):(80,4))`. Composed Layout as concatenation of sublayouts. 4. ` = (5:16, (2,2):(80,4))`. By-mode coalesce. 5. ` = (5,(2,2))):(16,(80,4))`. Final composed layout. We get exactly this result with CuTe if we use compile-time shapes and strides. The following C++ code prints `(_5,(_2,_2)):(_16,(_80,_4))`. ```cpp Layout a = make_layout(make_shape (Int<10>{}, Int<2>{}), make_stride(Int<16>{}, Int<4>{})); Layout b = make_layout(make_shape (Int< 5>{}, Int<4>{}), make_stride(Int< 1>{}, Int<5>{})); Layout c = composition(a, b); print(c); ``` If we use dynamic integers, the following C++ code prints `((5,1),(2,2)):((16,4),(80,4))`. ```cpp Layout a = make_layout(make_shape (10, 2), make_stride(16, 4)); Layout b = make_layout(make_shape ( 5, 4), make_stride( 1, 5)); Layout c = composition(a, b); print(c); ``` The results may _look_ different but are the mathematically the same. The 1s in the shape don't affect the layout as a mathematical function from 1-D coordinates to integers or as a function from 2-D coordinates to integers. In the dynamic case, CuTe can not coalesce the dynamic size-1 modes to "simplify" the layout due to the static rank and type of the tuples containing them. ### By-mode Composition Similar to by-mode `coalesce` and building up to a generic tiling operation, sometimes we do care about the shape of the `A` layout and would still like to apply `composition` to individual modes. For example, I have a 2-D `Layout` and would like some sublayout of the elements down the columns and another sublayout of elements across the rows. For this reason, `composition` also works when its second parameter -- the `B` -- is a `Tiler`. In general, a tiler is a layout or a tuple-of-layouts (note the generalization on `IntTuple`), which can be used as follows ```cpp // (12,(4,8)):(59,(13,1)) auto a = make_layout(make_shape (12,make_shape ( 4,8)), make_stride(59,make_stride(13,1))); // <3:4, 8:2> auto tiler = make_tile(Layout<_3,_4>{}, // Apply 3:4 to mode-0 Layout<_8,_2>{}); // Apply 8:2 to mode-1 // (_3,(2,4)):(236,(26,1)) auto result = composition(a, tiler); // Identical to auto same_r = make_layout(composition(layout<0>(a), get<0>(tiler)), composition(layout<1>(a), get<1>(tiler))); ``` We often use the `<LayoutA, LayoutB, ...>` notation to distinguish `Tiler`s from the concatenation-of-sublayouts notation `(LayoutA, LayoutB, ...)` that we used previously. The `result` in the above code can be depicted as the 3x8 sublayout of the original layout highlighted in the figure below. <p align="center"> <img src="../../images/cute/composition1.png" alt="composition1.png" height="250"/> </p> For convenience, CuTe also interprets `Shape`s as a tiler as well. A `Shape` is interpreted as tuple-of-layouts-with-stride-1: ```cpp // (12,(4,8)):(59,(13,1)) auto a = make_layout(make_shape (12,make_shape ( 4,8)), make_stride(59,make_stride(13,1))); // (8, 3) auto tiler = make_shape(Int<3>{}, Int<8>{}); // Equivalent to <3:1, 8:1> // auto tiler = make_tile(Layout<_3,_1>{}, // Apply 3:1 to mode-0 // Layout<_8,_1>{}); // Apply 8:1 to mode-1 // (_3,(4,2)):(59,(13,1)) auto result = composition(a, tiler); ``` where `result` can be depicted as the 3x8 sublayout of the original layout highlighted in the figure below. <p align="center"> <img src="../../images/cute/composition2.png" alt="composition2.png" height="250"/> </p> ## Composition Tilers In summary, a `Tiler` is one of the following objects. 1. A `Layout`. 2. A tuple of `Tiler`s. 3. A `Shape`, which will be interpreted as a tiler of `Layout`s with stride-1. Any of the above can be used as the second argument in `composition`. With (1), we think of the `composition` as between two functions from integers to integers, no matter the ranks of the layouts. With (2) and (3), the `composition` is performed on each pair of corresponding modes of `A` and `B`, until case (1) is found. This allows composition to be applied by-mode to retrieve arbitrary sublayouts of specified modes of a tensor ("Give me the 3x5x8 subblock of this MxNxL tensor") but also allows entire tiles of data to be reshaped and reordered as if they were 1-D vectors ("Reorder this 8x16 block of data into a 32x4 block using this weird order of elements"). We will see the by-mode cases appear often when we are tiling for threadblocks in examples that follow. We will see 1-D reshaping and reordering when we want to apply arbitrary partitioning patterns for threads and values in MMAs in examples that follow. ## Complement Before getting to "product" and "divide," we need one more operation. We can think of `composition` as a layout `B` that is "selecting" certain coordinates from another layout `A`. But what about the coordinates that aren't "selected"? To implement generic tiling, we want to be able to select arbitrary elements -- the tile -- and to describe the layout of those tiles -- the leftovers, or the "rest." The `complement` of a layout attempts to find another layout that represents the "rest" -- the elements that aren't touched by the layout. You can find many examples and checked post-conditions in [the `complement` unit test](../../../test/unit/cute/core/complement.cpp). The post-conditions include ```cpp // @post cosize(make_layout(@a layout_a, @a result))) >= size(@a cotarget) // @post cosize(@a result) >= round_up(size(@a cotarget), cosize(@a layout_a)) // @post for all i, 1 <= i < size(@a result), // @a result(i-1) < @a result(i) // @post for all i, 1 <= i < size(@a result), // for all j, 0 <= j < size(@a layout_a), // @a result(i) != @a layout_a(j) Layout complement(LayoutA const& layout_a, Shape const& cotarget) ``` That is, the complement `R` of a layout `A` with respect to a Shape (IntTuple) `M` satisfies the following properties. 1. The size (and cosize) of `R` is *bounded* by `size(M)`. 2. `R` is *ordered*. That is, the strides of `R` are positive and increasing. This means that `R` is unique. 3. `A` and `R` have *disjoint* codomains. `R` attempts to "complete" the codomain of `A`. The `cotarget` parameter above is most commonly an integer -- you can see we only use `size(cotarget)` above. However, sometimes it is useful to specify an integer that has static properties. For example, `28` is a dynamic integer and `(_4,7)` is a shape with size `28` that is statically known to be divisible by `_4`. Both will produce the same `complement` mathematically, but the extra information can used by `complement` to preserve the staticness of the result as much as possible. ### Complement Examples `complement` is most effective on static shapes and strides, so consider all integers below to be static. Similar examples for dynamic shapes and strides as well as IntTuple `cotarget` can be found in [the unit test](../../../test/unit/cute/core/complement.cpp). * `complement(4:1, 24)` is `6:4`. Note that `(4,6):(1,4)` has cosize `24`. The layout `4:1` is effectively repeated 6 times with `6:4`. * `complement(6:4, 24)` is `4:1`. Note that `(6,4):(4,1)` has cosize `24`. The "hole" in `6:4` is filled with `4:1`. * `complement((4,6):(1,4), 24)` is `1:0`. Nothing needs to be appended. * `complement(4:2, 24)` is `(2,3):(1,8)`. Note that `(4,(2,3)):(2,(1,8))` has cosize `24`. The "hole" in `4:2` is filled with `2:1` first, then everything is repeated 3 times with `3:8`. * `complement((2,4):(1,6), 24)` is `3:2`. Note that `((2,4),3):((1,6),2)` has cosize `24` and produces unique indices. * `complement((2,2):(1,6), 24)` is `(3,2):(2,12)`. Note that `((2,2),(3,2)):((1,6),(2,12))` has cosize `24` and produces unique indices. <p align="center"> <img src="../../images/cute/complement1.png" alt="complement1.png" height="75"/> </p> As a visualization, the above figure depicts the codomain of the last example. The image of the original layout `(2,2):(1,6)` is colored in gray. The complement effectively "repeats" the original layout (displayed in the other colors) such that the codomain size of the result is `24`. The complement `(3,2):(2,12)` can be viewed as the "layout of the repetition." ## Division (Tiling) Finally, we can define the division of a `Layout` by another `Layout`. Functions that divide a layout into components are useful as a basis for tiling and partitioning layouts. In this section, we'll define `logical_divide(Layout, Layout)`, which again considers all `Layout`s as 1-D functions from integers to integers, and then use that definition to create multidimensional `Layout` divides. Informally, `logical_divide(A, B)` splits a layout `A` into two modes -- in the first mode are all elements pointed to by `B` and in the second mode are all elements not pointed to by `B`. Formally, this can be written as $A \oslash B := A \circ (B,B^*)$ and implemented as ```cpp template <class LShape, class LStride, class TShape, class TStride> auto logical_divide(Layout<LShape,LStride> const& layout, Layout<TShape,TStride> const& tiler) { return composition(layout, make_layout(tiler, complement(tiler, size(layout)))); } ``` Note that this is defined only in terms of concatenation, composition, and complement. So what is that? > in the first mode are all elements pointed to by `B` This is clearly composition, `A o B`. > in the second mode are all elements not pointed to by `B` The elements NOT pointed to by `B` sounds like a complement, `B*`, up to the size of `A`. As we've seen above in the `complement` section, this can be described as the "layout of the repetition of `B`." If `B` is the "tiler", then `B*` is the layout of the tiles. ### Logical Divide 1-D Example Consider tiling the 1-D layout `A = (4,2,3):(2,1,8)` with the tiler `B = 4:2`. Informally, this means that we have a 1-D vector of 24 elements in some storage order defined by `A` and we want to extract tiles of 4 elements strided by 2. This is computed in the three steps described in the implementation above. * Complement of `B = 4:2` under `size(A) = 24` is `B* = (2,3):(1,8)`. * Concantenation of `(B,B*) = (4,(2,3)):(2,(1,8))`. * Composition of `A = (4,2,3):(2,1,8)` with `(B,B*)` is then `((2,2),(2,3)):((4,1),(2,8))`. <p align="center"> <img src="../../images/cute/divide1.png" alt="divide1.png" height="150"/> </p> The above figure depicts `A` as a 1-D layout with the elements pointed to by `B` highlighted in gray. The layout `B` describes our "tile" of data, and there are six of those tiles in `A` shown by each of the colors. After the divide, the first mode of the result is the tile of data and the second mode of the result iterates over each tile. ### Logical Divide 2-D Example Using the `Tiler` concept defined above, this immediately generalizes to multidimensional tiling. The below example simply applies `layout_divide` by-mode to the cols and rows of a 2-D layout using a `Tiler`. Similar to the 2-D composition example above, consider a 2-D layout `A = (9,(4,8)):(59,(13,1))` and want to apply `3:3` down the columns (mode-0) and `(2,4):(1,8)` across the rows (mode-1). This means the tiler can be written as `B = <3:3, (2,4):(1,8)>`. <p align="center"> <img src="../../images/cute/divide2.png" alt="divide2.png" height="450"/> </p> The above figure depicts `A` as a 2-D layout with the elements pointed to by `B` highlighted in gray. The layout `B` describes our "tile" of data, and there are twelve of those tiles in `A` shown by each of the colors. After the divide, the first mode of each mode of the result is the tile of data and the second mode of each mode iterates over each tile. In that sense, this operation can be viewed as a kind of `gather` operation or as simply a permutation on the rows and cols. Note that the first mode of each mode of the result is the sublayout `(3,(2,4)):(177,(13,2))` and is precisely the result we would have received if we had applied `composition` instead of `logical_divide`. ### Zipped, Tiled, Flat Divides It's easy to see the tiles when they are highlighted in the images above, but working with them can still be awkward. How would you slice out the `3`rd tile or the `7`th tile or the `(1,2)`th tile so you could continue working on it? Enter the convenience flavors of `logical_divide`. Suppose we have a `Layout` and a `Tiler` of some shape, then each operation will apply `logical_divide`, but potentially rearrange the modes into more convenient forms. ```text Layout Shape : (M, N, L, ...) Tiler Shape : <TileM, TileN> logical_divide : ((TileM,RestM), (TileN,RestN), L, ...) zipped_divide : ((TileM,TileN), (RestM,RestN,L,...)) tiled_divide : ((TileM,TileN), RestM, RestN, L, ...) flat_divide : (TileM, TileN, RestM, RestN, L, ...) ``` For example, the `zipped_divide` function applies `logical_divide`, and then gathers the "subtiles" into a single mode and the "rest" into a single mode. ```cpp // A: shape is (9,32) auto layout_a = make_layout(make_shape (Int< 9>{}, make_shape (Int< 4>{}, Int<8>{})), make_stride(Int<59>{}, make_stride(Int<13>{}, Int<1>{}))); // B: shape is (3,8) auto tiler = make_tile(Layout<_3,_3>{}, // Apply 3:3 to mode-0 Layout<Shape <_2,_4>, // Apply (2,4):(1,8) to mode-1 Stride<_1,_8>>{}); // ((TileM,RestM), (TileN,RestN)) with shape ((3,3), (8,4)) auto ld = logical_divide(layout_a, tiler); // ((TileM,TileN), (RestM,RestN)) with shape ((3,8), (3,4)) auto zd = zipped_divide(layout_a, tiler); ``` Then, the offset to the `3`rd tile is `zd(0,3)`. The offset to the `7`th tile is `zd(0,7)`. The offset to the `(1,2)`th tile is `zd(0,make_coord(1,2))`. The tile itself always has layout `layout<0>(zd)`. Indeed, it is always the case that `layout<0>(zipped_divide(a, b)) == composition(a, b)`. We note that `logical_divide` preserves the *semantics* of the modes while permuting the elements within those modes -- the `M`-mode of layout `A` is still the `M`-mode of the result and the `N`-mode of layout `A` is still the `N`-mode of the result. This is not the case with `zipped_divide`. The mode-0 in the `zipped_divide` result is the `Tile` itself (of whatever rank the `Tiler` was) and mode-1 is the layout of those tiles. It doesn't always make sense to plot these as 2-D layouts, because the `M`-mode is now more aptly the "tile-mode" and the `N`-mode is more aptly the "rest-mode". Regardless, we still can plot the resulting layout as 2-D as shown below. <p align="center"> <img src="../../images/cute/divide3.png" alt="divide3.png" height="450"/> </p> We've kept each tile as its color in the previous images for clarity. Clearly, iterating across tiles is now equivalent to iterating across a row of this layout and iterating over elements within a tile is equivalent to iterating down a column of this layout. As we'll see in the `Tensor` section, this can be used to great effect in partitioning within or across tiles of data. ## Product (Tiling) Finally, we can define the product of a Layout by another Layout. In this section, we'll define `logical_product(Layout, Layout)`, which again considers all `Layout`s as 1-D functions from integers to integers, and then use that definition to create multidimensional `Layout` products. Informally, `logical_product(A, B)` results in a two mode layout where the first mode is the layout `A` and the second mode is the layout `B` but with each element replaced by a "unique replication" of layout `A`. Formally, this can be written as $A \otimes B := (A, A^* \circ B)$ and implemented in CuTe as ```cpp template <class LShape, class LStride, class TShape, class TStride> auto logical_product(Layout<LShape,LStride> const& layout, Layout<TShape,TStride> const& tiler) { return make_layout(layout, composition(complement(layout, size(layout)*cosize(tiler)), tiler)); } ``` Note that this is defined only in terms of concatenation, composition, and complement. So what is that? > where the first mode is the layout `A` This is clearly just a copy of `A`. > the second mode is the layout `B` but with each element replaced by a "unique replication" of layout `A`. The "unique replication" of layout `A` sounds like complement, `A*`, up to the cosize of `B`. As we've seen in the `complement` section, this can be described as the "layout of the repetition of `A`". If `A` is the "tile", then `A*` is the layout of repetitions that are available for `B`. ### Logical Product 1-D Example Consider reproducing the 1-D layout `A = (2,2):(4,1)` according to `B = 6:1`. Informally, this means that we have a 1-D layout of 4 elements defined by `A` and we want to reproduce it 6 times. This is computed in the three steps described in the implementation above. * Complement of `A = (2,2):(4,1)` under `6*4 = 24` is `A* = (2,3):(2,8)`. * Composition of `A* = (2,3):(2,8)` with `B = 6:1` is then `(2,3):(2,8)`. * Concatenation of `(A,A* o B) = ((2,2),(2,3)):((4,1),(2,8))`. <p align="center"> <img src="../../images/cute/product1.png" alt="product1.png" height="175"/> </p> The above figure depicts `A` and `B` as a 1-D layouts. The layout `B` describes the number and order of repetitions of `A` and they are colored for clarity. After the product, the first mode of the result is the tile of data and the second mode of the result iterates over each tile. Note that the result is identical to the result of the 1-D Logical Divide example. Of course, we can change the number and order of the tiles in the product by changing `B`. <p align="center"> <img src="../../images/cute/product2.png" alt="product2.png" height="175"/> </p> For example, in the above image with `B = (4,2):(2,1)`, there are 8 repeated tiles instead of 6 and the tiles are in a different order. ### Logical Product 2-D Example We can use the by-mode `tiler` strategies previously developed to write multidimensional products as well. <p align="center"> <img src="../../images/cute/product2d.png" alt="product2d.png" height="250"/> </p> The above image demonstates the use of a `tiler` to apply `logical_product` by-mode. Despite this **not being the recommended approach**, the result is a rank-2 layout consisting of 2x5 row-major block that is tiled across a 3x4 column-major arrangement. The reason **this is not the recommended approach** is that the `tiler B` in the above expression is highly unintuitive. In fact, it requires perfect knowledge of the shape and strides of `A` in order to construct. We would like to express "Tile Layout `A` according to Layout `B`" in a way that makes `A` and `B` independent and is much more intuitive. #### Blocked and Raked Products The `blocked_product(LayoutA, LayoutB)` and `raked_product(LayoutA, LayoutB)` are rank-sensitive transformations on top of 1-D `logical_product` that let us express the more intuitive `Layout` products that we most often want to express. A key observation in the implementation of these functions are the compatibility post-conditions of `logical_product`: ``` // @post rank(result) == 2 // @post compatible(layout_a, layout<0>(result)) // @post compatible(layout_b, layout<1>(result)) ``` Because `A` is always compatible with mode-0 of the result and `B` is always compatible with mode-1 of the result, if we made `A` and `B` the same rank then we could "reassociate" like-modes after the product. That is, the "column" mode in `A` could be combined with the "column" mode in `B` and the "row" mode in `A` could be combined with the "row" mode in `B`, etc. This is exactly what `blocked_product` and `raked_product` do and it is why they are called rank-sensitive. Unlike other CuTe functions that take `Layout` arguments, these care about the top-level rank of the arguments so that each mode can be reassociated after the `logical_product`. <p align="center"> <img src="../../images/cute/productblocked2d.png" alt="productblocked2d.png" height="250"/> </p> The above image shows the same result as the `tiler` approach, but with much more intuitive arguments. A 2x5 row-major layout is arranged as a tile in a 3x4 column-major arrangement. Also note that `blocked_product` went ahead and `coalesced` mode-0 for us. Similarly, `raked_product` combines the modes slightly differently. Instead of the resulting "column" mode being constructed from the `A` "column" mode then the `B` "column" mode, the resulting "column" mode is constructed from the `B` "column" mode then the `A` "column" mode. <p align="center"> <img src="../../images/cute/productraked2d.png" alt="productraked2d.png" height="250"/> </p> This results in the "tile" `A` now being interleaved or "raked" with the "layout-of-tiles" `B` instead of appearing as blocks. Other references call this a "cyclic distribution." ### Zipped and Tiled Products Similar to `zipped_divide` and `tiled_divide`, the `zipped_product` and `tiled_product` simply rearrange the modes that result from a by-mode `logical_product`. ```text Layout Shape : (M, N, L, ...) Tiler Shape : <TileM, TileN> logical_product : ((M,TileM), (N,TileN), L, ...) zipped_product : ((M,N), (TileM,TileN,L,...)) tiled_product : ((M,N), TileM, TileN, L, ...) flat_product : (M, N, TileM, TileN, L, ...) ```
cutlass/media/docs/cute/02_layout_algebra.md/0
{ "file_path": "cutlass/media/docs/cute/02_layout_algebra.md", "repo_id": "cutlass", "token_count": 11055 }
49
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Implicit GEMM API") [README](../../README.md#documentation) > **Implicit GEMM Convolution** # CUTLASS Convolution Implicit GEMM is the formulation of a convolution operation as a GEMM (generalized matrix-matrix product). Convolution takes an activation tensor and applies a sliding filter on it to produce an output tensor. ## Introduction This release of CUTLASS contains several artifacts related to convolution. - [**Implicit GEMM Algorithm**](implicit_gemm_convolution.md#implicit-gemm-algorithm) - [**CUTLASS Convolution Implementation**](implicit_gemm_convolution.md#cutlass-convolution-implementation) - [**Convolution Examples**](implicit_gemm_convolution.md#convolution-example) # Implicit GEMM Algorithm 2-D convolution may be mapped to matrix multiply by first forming a _convolution matrix_ containing elements of the activations tensor, then multiplying this by a matrix formed from the filters tensor. The earliest form of this algorithm constructs the convolution matrix explicitly via an operation conventionally referred to as `im2col`. The resulting matrix replicates each activation element by a factor equal to the filter size, consuming additional storage capacity and memory bandwidth. The _implicit GEMM_ algorithm is a variation on the blocked, hierarchical GEMM computation in CUDA. Instead of constructing the convolution matrix explicitly, it forms tiles of the convolution matrix on the fly as data are loaded from global memory into Shared Memory by carefully updating pointers and predicates. Once the convolution matrix is formed in Shared Memory, the existing warp-level GEMM components accumulate the result of convolution and update the output tensor. This section describes the structure of an efficient Implicit GEMM Convolution CUDA kernel for Turing Tensor Cores. ## Mapping Convolution to GEMM The forward convolutional layer computes an output tensor _y = conv(x, w)_ where x(NHWC), w(KRSC), and y(NPQK) are 4-D tensors. This computation may be described by the following analytic function. ``` y[n, p, q, k] = sum_c(sum_r(sum_s( x[n, f(p, r), g(q, s), c] * w[k, r, s, c] ))) ``` where functions _f_ and _g_ are defined as follows. ``` f(p, r) = p * stride_h + R - r - 1 + pad_h g(q, s) = q * stride_w + S - s - 1 + pad_w ``` A [host](/tools/util/include/cutlass/util/reference/host/convolution.h) and [device](/tools/util/include/cutlass/util/reference/device/convolution.h) reference implementation are provided in the CUTLASS Utilities. This computation may be mapped to the elements of a matrix product as follows. ``` C = gemm(A, B) ``` where - A is a row-major matrix of extent _NHW_-by-_RSC_ containing activations - B is a column-major matrix of extent _RSC_-by-_K_ containing filters - C is a row-major matrix of extent _NPQ_-by-_K_ containing the output Each element of the output matrix _Cij_ corresponds to an element in the output tensor y[n, p, q, k] according to the following relation. ``` y[n, p, q, k] = Cij ``` where ``` i = q + Q * (p + P * n) j = k ``` These relations may be inverted as follows. ``` k = j n = i / (PQ) residual = i % (PQ) p = residual / Q q = residual % Q ``` The triple loop nest iterating over CRS to accumulate the result may also be linearized and mapped to the inner GEMM _K_ dimension (not to be confused with the filter tensor dimension _K_) by the following relations. ``` gemm_k = s + S * (r + R * c) ``` and inverse ``` c = gemm_k / (RS) residual = gemm_k % (RS) r = residual / S s = residual % S ``` Given these equations, a GEMM triple loop nest could be augmented with tensor indexing as follows. ```c++ int GEMM_M = N * P * Q; int GEMM_N = K; int GEMM_K = C * R * S; for (int gemm_i = 0; gemm_i < GEMM_M; ++gemm_i) { for (int gemm_j = 0; gemm_j < GEMM_N; ++gemm_j) { int n = gemm_i / (PQ); int npq_residual = gemm_i % (PQ); int p = npq_residual / Q; int q = npq_residual % Q; Accumulator accum = 0; for (int gemm_k = 0; gemm_k < GEMM_K; ++gemm_k) { int k = gemm_j; int c = gemm_k / (RS); int crs_residual = gemm_k % (RS); int r = crs_residual / S; int s = crs_residual % S; int h = f(p, r); int w = g(q, s); ElementA a = tensor_A.at({n, h, w, c}); ElementB b = tensor_B.at({k, r, s, c}); accum += a * b; } C[gemm_i * K + gemm_j] = accum; } } ``` The [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md) explicitly iterates over tiles. Consequently, a tile iterator could be implemented to compute these functions analytically and load the appropriate elements. However, the resulting modulo arithmetic would be computationally intensive, and overhead would limit performance of a GEMM kernel targeting Turing Tensor Cores. The following section describes how an efficient implementation may be implemented within the structure of a hierarchical GEMM kernel targeting Tensor Cores. # CUTLASS Convolution Implementation To get the best performance, the following parameters are recommended. - All tensors are 128-bit aligned NHWC tensors - Channel count (C) is a multiple of 32 elements - Filter count (K) is a multiple of 32 elements This enables 128-bit vector memory acceses which lead to efficient CUDA kernels. Smaller alignment is supported even on tensor cores by setting AlignmentA and AlignmentB in `conv::kernel::DefaultConv2dFprop`, but the performance is lower than 128-bit aligned tensors. # CUTLASS Device-level Convolution Operator CUTLASS defines CUDA C++ templates accepting numerous template arguments to specialize the resulting kernel by operation, data type, tile configuration, math instruction, and fused output operation. In [turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu), a convolution operation is defined as follows. ```c++ /// Define an Implicit GEMM convolution forward propagation (fprop) kernel using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, // data type of element a (mapped to activation for fprop) LayoutInputA, // layout of element a (mapped to activation for fprop) ElementInputB, // data type of element b (mapped to filters for fprop) LayoutInputB, // layout of element b (mapped to filters for fprop) ElementC, // data type of element c (mapped to output for fprop) LayoutC, // layout of element c (mapped to output for fprop) ElementAccumulator, // data type of internal accumulation MMAOp, // opcode class tag SmArch, // target SM architecture ThreadblockShape, // shape of threadblock tile WarpShape, // shape of warp-level GEMM tile InstructionShape, // shape of target math instruction EpilogueOp, // epilogue operator SwizzleThreadBlock, // optional function to reorder threadblocks for locality NumStages, // number of pipeline stages in threadblock-scoped GEMM cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm >::Kernel ``` This template is intended to be generic and cover all feasible configurations. The example specifies the following concrete data types, layouts, and tile shapes. ```c++ /// Define an Implicit GEMM convolution forward propagation (fprop) kernel using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< cutlass::int4b_t, // data type of element a (mapped to activation for fprop) cutlass::layout::TensorNHWC, // layout of element a (mapped to activation for fprop) cutlass::int4b_t, // data type of element b (mapped to filters for fprop) cutlass::layout::TensorNHWC, // layout of element b (mapped to filters for fprop) int32_t, // data type of element c (mapped to output for fprop) cutlass::layout::TensorNHWC, // layout of element c (mapped to output for fprop) int32_t, // data type of internal accumulation cutlass::arch::OpClassTensorOp, // opcode class tag cutlass::arch::Sm75, // target SM architecture cutlass::gemm::GemmShape<128, 128, 128>, // shape of threadblock tile cutlass::gemm::GemmShape<64, 64, 128>, // shape of warp-level GEMM tile cutlass::gemm::GemmShape<8, 8, 32>, // shape of target math instruction cutlass::epilogue::thread::LinearCombinationClamp< int32_t, // data type of output matrix 8, // The number of elements per vectorized // memory access. This becomes the vector width of // math instructions in the epilogue too. int32_t, // Data type of accumulator float>; , // epilogue operator SwizzleThreadBlock, // optional function to reorder threadblocks for locality 2, // number of pipeline stages in threadblock-scoped GEMM cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm >::Kernel ``` That is, this computes 2D convolutional forward propagation with 4-bit integer inputs and outputs (`cutlass::int4b_t`). Internal accumulation is performed using 32-bit integers (`int32_t`), and an elementwise linear combination operation is performed on the output in single-precision floating point (`float`). The threadblock and warp-level tile shapes refer to the hierarchically blocked GEMM computation [described here](/media/docs/gemm_api.md). Larger tiles achieve greater reuse of data loaded through shared memory but launch fewer CTAs and may not fully occupy the GPU for small problem sizes. Smaller tile configurations achieve lower peak utilizations but may better match the number of SMs within the GPU for real-world workloads. ## Launching the convolution The following code collects the arguments for an implicit GEMM operation into a structure. ```c++ // // Define arguments for CUTLASS Convolution // // mode (kCrossCorrelation or kConvolution) cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices); typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_c.device_ref(), {options.alpha, options.beta}, }; ``` The `mode` flag indicates whether to compute cross correlation or convolution. The arguments `input_size`, `filter_size`, `padding`, `conv_stride`, and `dilation` specify the dimensions of the input and output tensors and characterize the problem size. The arguments `tensor_a.device_ref()`, `tensor_b.device_ref()`, and `tensor_c.device_ref()` are CUTLASS `TensorRef<>` objects containing a pointer to the tensor data in GPU device memory and stride values. The following code initializes and launches the Implicit GEMM operation on the device. After initializing the arguments structure, it is used to query device-side workspace requirements and allocate them in device memory if needed. Then, the Implicit GEMM object is initialized with the `arguments` structure and the workspace in device memory. This initialization step precomputes internal lookup tables used by the convolution kernel and may also clear the device-side workspace if needed. Finally, the initialized Implicit GEMM object is called, launching a kernel on the device. `tensor_c` now contains the result of the implicit GEMM. ```c++ ImplicitGemm implicit_gemm_op; // Query workspace size size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Initialize the Implicit GEMM object cutlass::Status status = implicit_gemm_op.initialize(arguments, workspace.get()); if (status != cutlass::Status::kSuccess) { /* error */ } // // Launch initialized CUTLASS kernel // status = implicit_gemm_op(); if (status != cutlass::Status::kSuccess) { /* error */ } ``` The example demonstrates how the input and output tensors may be written to a file as CSV using `cutlass::HostTensor<>` defined in the [CUTLASS Utilities](/media/docs/utilities.md). ```c++ std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; // Copy device memory to host backing store tensor_c.sync_host(); output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; ``` ## CUTLASS Components CUTLASS defines the following CUDA C++ templates to implement Implicit GEMM Convolution which are described in greater detail in subsequent sections. **Activations tile iterators** load the activations tile into registers. Two implementations are provided: - [conv2d_fprop_activation_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically - [conv2d_fprop_activation_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h) optimizes iterating over global memory and creating GEMM-A tile in shared memory. **Filter tile iterators** load filters into registers. Similarly, two implementations are provided: - [conv2d_fprop_filter_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically - [conv2d_fprop_filter_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h) optimizes iterating over global memory and creating GEMM-B tile in shared memory. The improvements covered by optimized iterators are: a. Precomputing kernel-invariant pointer deltas on the host b. Computing cta-invariant mask predicates on device-side iterator ctors c. Use of [fast divmod](/include/cutlass/fast_math.h) to map GEMM dimensions to convolution tensors. For example, an _optimized_ activation iterator uses fast divmod to map GEMM _M_ to NPQ. **Pipelined mainloop** loads threadblock-scoped tiles from global memory into shared memory and then applies CUTLASS warp-level GEMM operations to load from Shared Memory and issue instructions to Turing Tensor Cores. - [mma_pipelined.h](/include/cutlass/conv/threadblock/implicit_gemm_pipelined.h) Operations for storing to shared memory and performing warp-wide matrix multiply operations using Turing Tensor Cores are applied directly from the CUTLASS GEMM components. These include the following components. **Regular Tile Iterator** implemented in [transform::threadblock::RegularTileIterator](/include/cutlass/transform/threadblock/regular_tile_iterator.h) stores register-backed fragments to Shared Memory in permuted layouts. **Warp-level GEMM** defined in [cutlass::gemm::warp::MmaTensorOp](/include/cutlass/gemm/warp/mma_tensor_op.h) defines tile iterators to load from Shared Memory and issue math instructions to Turing Tensor Cores. Further details are [described in here](/media/docs/gemm_api.md#warp-level-matrix-multiply-api). **Epilogue** reorders accumulator elements among threads within a threadblock to efficiently update the output tensor. It is implemented in [epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h). ### Loading Activations and Filters The Implicit GEMM Convolution algorithm partitions the GEMM _K_ dimension (of extent _CRS_) into threadblock tiles and assigning each threadblock tile to one filter position and an interval of channels. After iterating over all filter positions, the convolution algorithm advances to the next interval of channels and proceeds from filter `r=0, s=0`. The matrix product of one threadblock tile is computed per iteration of the mainloop as described in the [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md). To summarize, the threadblock tile of activations and filters are loaded from tensors in global memory and stored to shared memory. Each thread within the threadblock loads one or more vectors and collectively span the entire tile. The following figure illustrates one particular iteration of the Implicit GEMM mainloop. Each thread within the threadblock is mapped to several vectors of elements in the Activations and Filters tensors. Each index in the GEMM _M_ dimension corresponds to a unique _(N,P,Q)_ index of the output tensor, and pointers may be computed based on this as well as filter position _(r,s)_. ![ALT](/media/images/conv2d-fprop-int4.png "Convolution Forward Propagation on INT4 data.") The CUTLASS component that embodies this functionality is [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h). Its constructor computes the mapping of GEMM _M_ to _(N, P, Q)_, the `at()` method maps the linear offset into the Activations tensor for each memory access the thread is to perform. Additionally, the method `valid()` computes the valided of the access for each filter position and for each memory access to indicate whether the memory access will be within the bounds of the tensor or out of bounds. `operator++()` iterates over memory accesses performed by a thread in both contiguous and strided dimension. ```c++ // cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h // Update iterator to thread's next contiguous, strided memory access Conv2dFpropActivationTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } ``` After all accesses have been visited for the current threadblock tile, `advance()` updates the pointers to next tile. Offsets added to each pointer follows the traversal of filter positions, performing one of the following: - advance from filter position _(r, s, c)_ to filter position _(r, s+1, c)_ - advance from filter position _(r, S-1, c)_ to filter position _(r+1, 0, c)_ - advance from filter position _(R-1, S-1, c)_ to filter position _(0, 0, c+32)_ This logic within method `advance()`'s body computes the above three updates for the activation GEMM-A tile. ```c++ // cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h // Advance to the next access void advance() { // moves to the next tile ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; filter_c_ += Shape::kRow * problem_size_.split_k_slices; } ``` Similar logic holds for [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h). To reduce computational overhead in the mainloop body, the pointer offsets may be precomputed in host code and provided to the CUDA kernel as a lookup table in its `Params` structure. As shown in [Conv2dFpropFilterTileAccessIteratorOptimized](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h), the logic to compute offsets from filter position has been extracted to the `Params` constructor. ```c++ // cutlass/conv/threadblock/conv2d_params.h struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNHWC> { ... // next S inc_next[0] = conv_sign * (int64_t(layout.stride()[0]) * problem_size.dilation_w) * element_size_bits / 8; // next R inc_next[1] = conv_sign * ( int64_t(layout.stride()[1]) * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next C inc_next[2] = ( threadblock_shape.column() * problem_size.split_k_slices - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; ... } ``` This allows only a simple lookup from the _delta table_ performed in device code in `Conv2dFpropActivationTileAccessIteratorOptimized::advance()`. ```c++ // cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h CUTLASS_HOST_DEVICE void advance() { int next_idx = 0; // moves to the next tile ++filter_s_; if (filter_s_ == problem_size_.S) { filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { next_idx = 1; } else { filter_r_ = 0; next_idx = 2; } } add_byte_offset_(params_.inc_next[next_idx]); // in addition to Conv2dFpropActivationTileAccessIteratorAnalytic::advance() if (next_idx == 2) { filter_c_ += params_.filter_c_delta; } } ``` ### Making use of Tensor Cores Turing Tensor Cores compute matrix multiply-accumulate operations efficiently by sharing data among all threads within a warp. The following operations are supported. | **Shape** | **A** | **B** | **C** | |-----------|---------|---------|---------| | 8x8x32 | int4b_t | int4b_t | int32_t | | 8x8x16 | int8b_t | int8b_t | int32_t | | 16x8x8 | half | half | half | | 16x8x8 | half | half | float | Functionally, the Turing 8x8x32 matrix multiply operation distributes the _A_, _B_, and _C_ matrix across 32 threads within a warp according to the following illustration. ![ALT](/media/images/mma-8x8x32.png "Turing Tensor Op") This Tensor Core operation is accessible to the CUDA programmer via the PTX instruction [`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832). CUTLASS wraps inline PTX with device-side intrinsics defined in [`cutlass/arch/mma_sm75.h`](/include/cutlass/arch/mma_sm75.h) as in the following example. ```c++ unsigned A; // eight packed 4-bit integer elements unsigned B; // eight packed 4-bit integer elements int C[2]; // two 32-bit integer elements int D[2]; // two 32-bit integer elements asm volatile( "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); ``` To load data efficiently from Shared Memory into registers with the distribution among warps matching the above, the Turing GPU architecture introduces [`ldmatrix`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix). `ldmatrix` is the ultimate warp-cooperative instruction, as all threads contribute addresses to up to 32 row vectors of size 128-bits in length. These rows are fetched from Shared Memory and then distributed among groups of four threads per row. The arrangement of SMEM pointers and destination registers within threads is illustrated as follows. Thread 0 is highlighted in the illustration to emphasize the mapping. ![ALT](/media/images/ldmatrix-8x128bx4.png "Turing ldmatrix PTX instruction") The size of the Turing Tensor Core operation computing matrix multiply-accumulate on INT4 data is 8-by-8-by-32 elements. `ldmatrix` fetches up to 32 rows (or columns) per operation. Sixteen Tensor Core operations may be issued to implement a 32-by-32-by-32 matrix product and perfectly consume all data loaded by two `ldmatrix` instructions as shown in the following figure. Larger tiles are possible by increasing the number of memory instructions and issuing more Tensor Core operations, up to warp-level matrix operations of size 64-by-64-by-32. The limit is the number of registers to hold the accumulator elements. ![ALT](/media/images/ldmatrix-tensorop-32x32x32.png "Turing ldmatrix PTX instruction feeding Tensor Core operations") ### Shared Memory Layouts In the previous two sections, we have described how data may be loaded from activations and filters tensors in global memory to compute convolution, and we have described a composition of `ldmatrix` and `mma.sync` to fetch data from Shared Memory and issue Tensor Core operations. To ensure this data movement is efficient, care must be taken to ensure bank conflicts are avoided. CUTLASS uses a permuted Shared Memory layout to avoid bank conflicts when storing to Shared Memory and to efficiently load from Shared Memory using `ldmatrix`. The following figure illustrates the thread mapping used for the loading the activations and filters threadblock tiles from global memory and the permuted layout in Shared Memory. ![ALT](/media/images/tensor-op-permuted-smem-layout-TN.png "Shared Memory layout used for Turing Tensor Cores") In the illustration, one warp-wide memory access is highlighted in blue, with individual threads loading one 128-bit vector. The tile in global memory could correspond either to the activations or filters and is assumed to be 'strip-mined' with four threads loading consecutive channels. Shared Memory is visualized as a 'row-major' matrix with eight columns representing the eight 128-bit banks. As described in the CUTLASS GTC 2019 presentation [slides](https://developer.download.nvidia.com/video/gputechconf/gtc/2019/presentation/s9593-cutensor-high-performance-tensor-operations-in-cuda-v2.pdf), [recording](https://developer.nvidia.com/gtc/2019/video/S9593), an access to Shared Memory will be conflict-free if the following conditions are satisfied across each warp: - {T0, T1, .., T7} do not access the same 128-bit bank - {T8, T9, .., T15} do not access the same 128-bit bank - {T16, T17, .., T23} do not access the same 128-bit bank - {T24, T25, .., T31} do not access the same 128-bit bank To achieve conflict-free stores, the Shared Memory layout remaps the strip-mined arrangement to transpose the vectors and applies an XOR operation on the column index of each thread's pointer. Specifically, ```c++ int store_column = (lane_id % 8) ^ (lane_id / 8); ``` This transformation on the layout will be instrumental in reading slices of data from Shared Memory to compute the warp-level matrix multiply using Tensor Cores. The following figure shows how the first sixteen threads participating in an `ldmatrix` instruction logically map to the c=0..31 slice of a matrix in Shared Memory. This slice is known as a "k-group" within the code because it corresponds to the same K-index of a warp-level matrix multiply. ![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k0.png "Load kgroup=0 from Shared Memory using ldmatrix") The lower half of the figure shows the physical arrangement in Shared Memory, with threads offset by row and column according to the XOR function. By inspection, we can observe there are no bank conflicts, as _T0 ... T7_ each access unique banks, as do _T8 ... T15_. and beyond. To advance to the next "k-group" within Shared Memory, pointers are updated using an XOR operation according to the following sequence: - **^1** advances from _k=0_ to _k=1_ - **^3** advances from _k=1_ to _k=2_ - **^1** advances from _k=2_ to _k=3_ - **^3** advances from _k=3_ to _k=0_ The first of these transitions is shown below. ![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k1.png "Advance to kgroup=1 from Shared Memory using ldmatrix") The [CUTLASS warp-level GEMM API](/media/docs/gemm_api.md#warp-level-matrix-multiply-api) defines templates for loading slices of data from permuted Shared Memory and issuing operations to Tensor Cores. ### Updating the Output Tensor After the mainloop terminates, the accumulator tile of the warp-level GEMM stores a warp's contribution to the output tensor. However, the distribution of data among threads within the threadblock is specialized for efficient matrix multiply-accumulate operations using Tensor Cores and is not conducive to efficient, coalesced operations to Global Memory. A data rearrangement is needed. The **Epilogue** is the component for exchanging accumulator elements through Shared Memory, loading slices of the output matrix or tensor, applying an elementwise operation such as linear scaling or bias, and storing the result to the output tensor. CUTLASS structures this as several components: - [cutlass::epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h) - the top-level component for looping over the entire threadblock tile - [cutlass::epilogue::warp::TileIteratorTensorOp](/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h) - a specialized component for storing accumulators for Tensor Core to Shared Memory - [cutlass::epilogue::threadblock::SharedLoadIterator](/include/cutlass/epilogue/threadblock/shared_load_iterator.h) - a component for loading elements from a row-major arrangement in Shared Memory - [cutlass::epilogue::threadblock::PredicatedTileIterator](/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h) - a component for loading or storing matrix fragments to Global Memory (with bounds checks) - [cutlass::epilogue::thread::LinearCombination](/include/cutlass/epilogue/thread/linear_combination.h) - an element-wise function computing `alpha * AB + beta * C` to compute the final output ## Unit Tests Unit tests verify the functional behavior of each of the above components in a standalone CUDA kernel. This provides a convenient environment to a. inspect the template definition, b. showcase instantiation of use of these templates in device code, and c. assert functional correctness. **Convolution unit tests** - Device-wide convolution operator: [conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu) **GEMM unit tests** - Warp-scoped matrix multiply for Turing Tensor Cores: [gemm_sm75.cu](/test/unit/gemm/warp/gemm_sm75.cu) **Epilogue unit tests** - Epilogue for Turing Tensor Cores: [epilogue_tensor_op.cu](/test/unit/epilogue/threadblock/epilogue_tensor_op.cu) # Convolution Example This section describes the provided convolution example and is intended to orient the reader to the CUTLASS implementation of Implicit GEMM Convolution. ## Building and Running the Example Example `09_turing_tensorop_conv2dfprop` computes a forward convolutional layer in which inputs and outputs are 4-b integers. The example source is visible in [examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu). Before building the example, first perform the prerequisite steps for building any CUTLASS component [described here](/media/docs/quickstart.md). Compute capability 7.5 refers to the Turing architecture, and this work requires CUDA 10.2 Toolkit or later to target Turing Tensor Cores using the native `mma` [PTX instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832). ```bash $ mkdir build && cd build $ cmake .. -DCUTLASS_NVCC_ARCHS=75 ``` To build the example, execute `make 09_turing_tensorop_conv2dfprop` from the build directory. ```bash $ make 09_turing_tensorop_conv2dfprop $ ls examples/09_turing_tensorop_conv2dfprop examples/09_turing_tensorop_conv2dfprop ``` This example provides a simple command line interface to specify the extents of 4D tensors of 4-bit integer elements (`cutlass::int4b_t`), initialize them to random values, and compute the result of a convolutional layer. Optionally, the input and output tensors may be saved to .csv files, and the CUTLASS host-side reference check may be executed to verify correctness. The complete usage statement is visible by running with `--help`: ```bash $ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --help 09_turing_tensorop_conv2dfprop example This example uses Turing's Tensor Core operators on int4 data types to compute forward convolution on tensors of layout NHWC. Options: --help If specified, displays this usage statement. --n <int> Input tensor extent N --h <int> Input tensor extent H --w <int> Input tensor extent W --c <int> Input tensor extent C --k <int> Filter extent K --r <int> Filter extent R --s <int> Filter extent S --alpha <float> Epilogue scalar alpha --beta <float> Epilogue scalar beta --ref-check If set (true), reference check on the host is computed --perf-check If set (true), performance is measured. --benchmark If set (true), performance benchmarking on several layers and batch-size. --iterations <int> Number of profiling iterations to perform. --save-workspace If set, workspace is written to a text file. --tag <string> String to replicate across the first column in the results table Examples: $ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1 $ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check ``` *Note*, this example assumes all tensors are 128b aligned and in format _NHWC_. Consequently, dimension _C_ must be divisible by 32 for activations, filters, and output. If the option `--benchmark` is passed, several layers from ResNet50 are profiled for various batch sizes. This sample output was computed on an NVIDIA RTX 2080 compiled with CUDA 10.2. ```bash build$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --benchmark ``` Convolution can also be run by the CUTLASS Profiler. # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/implicit_gemm_convolution.md/0
{ "file_path": "cutlass/media/docs/implicit_gemm_convolution.md", "repo_id": "cutlass", "token_count": 12488 }
50
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import ctypes import json import os import sqlite3 import subprocess import tempfile from cuda import cuda, nvrtc from cutlass_library import SubstituteTemplate import cutlass from cutlass import CACHE_FILE, CUTLASS_PATH, cuda_install_path, logger from cutlass.backend.gemm_operation import GemmOperationUniversal from cutlass.backend.library import ApiVersion from cutlass.backend.utils.device import device_cc IncludeTemplate = r"""#include "${include}" """ def compile_with_nvcc(cmd, source, error_file): succeed = True try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: error_message = e.output.decode() with open(error_file, "w") as error_out: error_log = "Compilation error for the following kernel: \n" error_log += source error_log += "\nError Message:\n" error_log += error_message error_out.write(error_log) succeed = False if not succeed: # Print the error log to stdout if log level is set to warning or higher # verbosity. Otherwise, simply point to the error log file. logger.warning(error_log) raise Exception(f"Invalid Kernel. See '{error_file}' for details.") class CompilationOptions: """ Compilation options. """ def __init__(self, flags, arch, include_paths=[]): self.includes = [] self.include_paths = include_paths self.flags = flags self.arch = arch def get_str(self): opts = [] for flag in self.flags: opts.append(flag) for incl in self.include_paths: opts.append(f"--include-path={incl}") arch_flag = f"-arch=sm_{self.arch}" if self.arch == 90: arch_flag += "a" opts.append(arch_flag) return " ".join(opts) def get(self): options = [] for flag in self.flags: options.append(bytes(str.encode(flag))) for incl in self.include_paths: options.append(bytes(str.encode(f" --include-path={incl}"))) arch_flag = f" -arch=sm_{self.arch}" if self.arch == 90: arch_flag += "a" options.append(bytes(str.encode(arch_flag))) return options def convertToBinaryData(filename): with open(filename, "rb") as file: blobData = file.read() return blobData def CDLLBin(host_binary): tempfile.tempdir = "./" temp_so = tempfile.NamedTemporaryFile(prefix="host_func", suffix=".so", delete=True) with open(temp_so.name, "wb") as file: file.write(host_binary) host_lib = ctypes.CDLL(temp_so.name) return host_lib class ArtifactManager: """ Artifact manager """ def __init__(self) -> None: connection = sqlite3.connect(CACHE_FILE) cursor = connection.cursor() # Create the table if it does not already exist sqlite_create_table_query = """ CREATE TABLE IF NOT EXISTS compiled_operations(op_key TEXT NOT NULL UNIQUE, cubin BLOB NOT NULL, hostbin BLOB NOT NULL, op_name TEXT NOT NULL, op_attrs TEXT NOT NULL) """ cursor.execute(sqlite_create_table_query) connection.commit() cursor.close() self._nvrtc_compile_options = ["-std=c++17", "-default-device"] self._nvcc_compile_options = [ "-std=c++17", "--expt-relaxed-constexpr", "-Xcudafe --diag_suppress=esa_on_defaulted_function_ignored", ] self.nvcc() self.compiled_cache_device = {} self.compiled_cache_host = {} def nvrtc(self): self.backend = "nvrtc" self.default_compile_options = self._nvrtc_compile_options def nvcc(self): self.backend = "nvcc" self.default_compile_options = self._nvcc_compile_options def insert_operation(self, op_key, cubin, hostfile, op_name, op_attrs): connection = sqlite3.connect(CACHE_FILE) cursor = connection.cursor() sqlite_insert_blob_query = """ INSERT OR IGNORE INTO compiled_operations (op_key, cubin, hostbin, op_name, op_attrs) VALUES (?, ?, ?, ?, ?)""" hostbin = convertToBinaryData(hostfile) data_tuple = (op_key, cubin, hostbin, op_name, json.dumps(op_attrs)) cursor.execute(sqlite_insert_blob_query, data_tuple) connection.commit() cursor.close() def load_operation(self, op_key, extra_funcs): connection = sqlite3.connect(CACHE_FILE) cursor = connection.cursor() sqlite_fetch_blob_query = """SELECT * from compiled_operations where op_key = ?""" cursor.execute(sqlite_fetch_blob_query, (op_key,)) record = cursor.fetchall() if len(record) == 0: return False for row in record: key, cubin_image, host_binary, operation_name, op_attr = row op_attr = json.loads(op_attr) err, module = cuda.cuModuleLoadData(cubin_image) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError("Cuda Error: {}".format(err)) err, kernel = cuda.cuModuleGetFunction(module, bytes(str.encode(operation_name))) self.compiled_cache_device[key] = kernel compiled_host_fns = {} host_lib = CDLLBin(host_binary) func_name = operation_name + "_get_params" func = getattr(host_lib, func_name) func.restype = ctypes.POINTER(ctypes.c_char * op_attr[0]) compiled_host_fns["get_args"] = func func_name = operation_name + "_shared_memory_size" func = getattr(host_lib, func_name) compiled_host_fns["shared_memory_capacity"] = func() for attr in op_attr: if isinstance(attr, str): func_name = operation_name + "_" + attr func = getattr(host_lib, func_name) # Set the return type of the function if attr in extra_funcs and extra_funcs[attr] != None: func.restype = extra_funcs[attr] compiled_host_fns[attr] = func self.compiled_cache_host[key] = compiled_host_fns return True def emit_compile_(self, operation_list, compilation_options, host_compilation_options): """ Compile a list of kernels and store them into database """ source_buffer_device = "" source_buffer_host = "" # 1. include includes = [] for operation in operation_list: for incl in operation.emitter.includes: if incl not in includes: includes.append(incl) includes_host = ["builtin_types.h", "device_launch_parameters.h", "stddef.h"] + includes for incl in includes: source_buffer_device += SubstituteTemplate( IncludeTemplate, {"include": incl}, ) for incl in includes_host: source_buffer_host += SubstituteTemplate( IncludeTemplate, {"include": incl}, ) # 2. Operations for operation in operation_list: source_buffer_device += operation.emit() source_buffer_host += operation.emit() values = { "operation_name": operation.name(), "operation_suffix": operation.emitter.operation_suffix, } source_buffer_device += SubstituteTemplate( operation.KernelTemplate, values, ) source_buffer_host += SubstituteTemplate(operation.HostTemplate, values) if self.backend == "nvrtc": # 3. compile err, program = nvrtc.nvrtcCreateProgram( str.encode(source_buffer_device), bytes(str.encode("module.cu")), 0, [], []) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError("NVRTC Error: {}".format(err)) # Compile program options = compilation_options.get() err, = nvrtc.nvrtcCompileProgram(program, len(options), options) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: error_string = "NVRTC Error: {}\n".format(err) # Get log from compilation err, logSize = nvrtc.nvrtcGetProgramLogSize(program) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError("NVRTC Error: {}".format(err)) log = b" " * logSize err, = nvrtc.nvrtcGetProgramLog(program, log) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError("NVRTC Error: {}".format(err)) raise RuntimeError(error_string + log.decode() + source_buffer_device) # Get data from compilation err, dataSize = nvrtc.nvrtcGetCUBINSize(program) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError("NVRTC Error: {}".format(err)) cubin_image = b" " * dataSize (err,) = nvrtc.nvrtcGetCUBIN(program, cubin_image) if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError("NVRTC Error: {}".format(err)) else: # with nvcc backend # emit code tempfile.tempdir = "./" temp_cu = tempfile.NamedTemporaryFile( prefix="kernel", suffix=".cu", delete=True) temp_cubin = tempfile.NamedTemporaryFile( prefix="kernel", suffix=".cubin", delete=True) with open(temp_cu.name, "w") as file: file.write(source_buffer_device) # compile with nvcc cmd_template = "${cuda_install_path}/bin/nvcc ${options} -cubin ${srcfile} -o ${tarfile}" values = { "cuda_install_path": cuda_install_path(), "options": compilation_options.get_str(), "srcfile": temp_cu.name, "tarfile": temp_cubin.name, } cmd = SubstituteTemplate(cmd_template, values) compile_with_nvcc(cmd.split(" "), source_buffer_device, "./cutlass_python_compilation_device_error.txt") # load the cubin image with open(temp_cubin.name, "rb") as file: cubin_image = file.read() tempfile.tempdir = "./" temp_src = tempfile.NamedTemporaryFile( prefix="host_src", suffix=".cu", delete=True) # Write the host source with open(temp_src.name, "w") as outfile: outfile.write(source_buffer_host) temp_dst = tempfile.NamedTemporaryFile( prefix="host_func", suffix=".so", delete=True) # Set up host compilation arguments cmd = [] cmd.append(f"{cuda_install_path()}/bin/nvcc") cmd.extend(["-x", "cu", "-Xcompiler=-fpermissive", "-Xcompiler=-w", "-Xcompiler=-fPIC"]) cmd.extend(host_compilation_options.get_str().split(" ")) cmd.extend(["-shared", "-o", temp_dst.name, temp_src.name, "-lcudart", "-lcuda"]) # Comile and load the library compile_with_nvcc( cmd, source_buffer_host, error_file="./cutlass_python_compilation_host_error.txt") host_lib = ctypes.CDLL(temp_dst.name) return cubin_image, host_lib, temp_dst def add_module(self, operations, compile_options=None, bypass_cache=False): """ Insert a new compiled device module """ include_paths = [ cuda_install_path() + "/include", CUTLASS_PATH + "/include", CUTLASS_PATH + "/tools/util/include", CUTLASS_PATH + "/python/cutlass/cpp/include", ] cutlass.initialize_cuda_context() arch = device_cc() host_compile_options = CompilationOptions( self._nvcc_compile_options, arch, include_paths) if compile_options is None: compile_options = CompilationOptions( self.default_compile_options, arch, include_paths) # save the cubin operation_key = [] operation_list = [] for operation in operations: # step 1: get kernel string as key key = operation.rt_module.emit() + operation.procedural_name() + self.backend # step 1: check if the operation is in cache compiled_kernel = self.compiled_cache_device.get(key) if compiled_kernel is None and not bypass_cache: hit = self.load_operation(key, getattr( operation.rt_module, "extra_funcs", {})) if hit: compiled_kernel = self.compiled_cache_device.get(key) assert compiled_kernel is not None if compiled_kernel is not None: operation.rt_module.kernel = compiled_kernel compiled_host_fns = self.compiled_cache_host.get(key) assert compiled_host_fns is not None for key in compiled_host_fns.keys(): setattr(operation.rt_module, key, compiled_host_fns[key]) operation.rt_module.initialize() else: operation_list.append(operation.rt_module) operation_key.append(key) if len(operation_list) > 0: cubin_image, host_lib, host_file = self.emit_compile_( operation_list, compile_options, host_compile_options) err, module = cuda.cuModuleLoadData(cubin_image) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError("Cuda Error: {}".format(err)) operation_name = [] operation_attr = [] for operation, key in zip(operation_list, operation_key): # get device kernels err, operation.kernel = cuda.cuModuleGetFunction( module, bytes(str.encode(operation.name())) ) operation_name.append(operation.name()) self.compiled_cache_device[key] = operation.kernel # get host functions compiled_host_fns = {} op_attr = [] # get param size func_name = operation.name() + "_get_param_size" func = getattr(host_lib, func_name) param_size = func() func_name = operation.name() + "_get_params" func = getattr(host_lib, func_name) func.argtype = operation.argtype func.restype = ctypes.POINTER(ctypes.c_char * param_size) setattr(operation, "get_args", func) compiled_host_fns["get_args"] = func # set shared memory size func_name = operation.name() + "_shared_memory_size" func = getattr(host_lib, func_name) setattr(operation, "shared_memory_capacity", func()) compiled_host_fns["shared_memory_capacity"] = func() # set the maximum dynamic shared size operation.initialize() # get extra functions op_attr.append(param_size) if hasattr(operation, "extra_funcs"): for suffix, ret_type in operation.extra_funcs.items(): func_name = operation.name() + "_" + suffix func = getattr(host_lib, func_name) if ret_type is not None: func.restype = ret_type setattr(operation, suffix, func) compiled_host_fns[suffix] = func op_attr.append(suffix) operation_attr.append(op_attr) self.compiled_cache_host[key] = compiled_host_fns for (key, operation_name, operation_attr,) in zip(operation_key, operation_name, operation_attr): self.insert_operation( key, cubin_image, host_file.name, operation_name, operation_attr)
cutlass/python/cutlass/backend/compiler.py/0
{ "file_path": "cutlass/python/cutlass/backend/compiler.py", "repo_id": "cutlass", "token_count": 8423 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ DAG IR used by Python EVT """ import networkx as nx from cutlass_library import DataType from cutlass.backend.evt.ir.node import NodeBase from cutlass.backend.utils import device_cc class DAGIR: """ ``DAGIR`` is the main data structure used in the EVT Intermediate Representation. It consists of a series of ``Node`` s, each representing epilogue visitor nodes. In the DAGIR, ``node`` is an string of its name. ``node_meta`` is the underlying class of the node """ def __init__(self, element_compute=DataType.f32, cc: int=None) -> None: # The EVT DAGIR is managed through the nextworkX Digraph class self._graph = nx.DiGraph() self.element_compute = element_compute self.reduction_names = [] self.cc = cc if cc else device_cc() # # IR manipulator # def add_node(self, meta: NodeBase): """ Add a node to dag ir """ if self.has_node(meta.name): raise SyntaxError(f"Variable '{meta.name}' cannot be defined twice.") self._graph.add_node(meta.name, meta=meta) def add_edge(self, src: str, dst: str, weight: int=0): """ Add an edge src -> dst to dag ir with weight """ if not self.has_node(src): raise SyntaxError(f"Variable '{src}' is undefined.") if not self.has_node(dst): raise SyntaxError(f"Variable '{dst}' is undefined.") self._graph.add_edge(src, dst, weight=weight) def remove_node(self, node: str): """ Remove node from dag ir """ self._graph.remove_node(node) def remove_edge(self, src: str, dst: str): """ Remove edge src -> dst """ self._graph.remove_edge(src, dst) # # Helper functions for getting attrs # def has_node(self, node: str) -> bool: """ Check if the node is in the graph """ return self._graph.has_node(node) def in_degree(self, node: str): """ Get the input degree of node """ return self._graph.in_degree(node) def in_edges(self, node: str): """ Get the input edges of node """ return [edge for edge in self._graph.in_edges(node)] def out_degree(self, node: str): """ Get the output degree of node """ return self._graph.out_degree(node) def out_edges(self, node: str): """ Get the output edges of node """ return [edge for edge in self._graph.out_edges(node)] def get_node_meta(self, node: str): """ Get the meta data of the node """ return self._graph.nodes[node]["meta"] def get_edge_weight(self, src, dst): """ Get the edge weight of edge src->dst """ return self._graph.get_edge_data(src, dst)["weight"] # # High-level helper functions # def all_reachable_nodes(self, node: str): """ Get all the nodes reachable from the current node (exclude) """ return list(nx.dfs_preorder_nodes(self._graph, source=node)) def get_users(self, node: str): """ Get all users of the current node """ return [edge[1] for edge in self.out_edges(node)] def get_all_inputs(self, node: str): """ Get all the input nodes sorted by edge weight """ in_edges = self.in_edges(node) edge_weights = [self.get_edge_weight(*edge) for edge in in_edges] return [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))] def get_all_inputs_meta(self, node: str): """ Get all the input node metas sorted by edge weight """ return [self.get_node_meta(input_node) for input_node in self.get_all_inputs(node)] def replace_all_uses_with(self, node1, node2): """ Replace all uses of node1 with node2 """ for edge in self.out_edges(node1): weight = self.get_edge_weight(*edge) user = edge[1] self.add_edge(node2, user, weight) self.remove_edge(node1, user) self.remove_node(node1) # # Node accessor # def nodes_topological_order(self): """ Get the nodes in the unique lexicographical topological order It generates a unique ordering of nodes by first sorting topologically and then additionally by sorting lexicographically. Although topological_sort alone also works, this generates a unique key for each epilogue visitor pattern and ensures the compilation cache can be reused. :return: list[str] """ return list(nx.lexicographical_topological_sort(self._graph)) def node_metas_topological_order(self): """ Get the node metas in topological order :return: list[NodeBase] """ return [self.get_node_meta(node) for node in self.nodes_topological_order()] @property def nodes(self): """ Get all nodes :return: list[str] """ return list(self._graph.nodes) @property def nodes_meta(self): """ Get all node metas :return: list[NodeBase] """ return [data[1]['meta'] for data in self._graph.nodes.data()] @property def edges(self): """ Get all edges :return: list[(str, str)] """ return list(self._graph.edges) # # Path # def has_path(self, src: str, target: str) -> bool: """ Return True is a path exists from src to target """ return nx.has_path(self._graph, src, target)
cutlass/python/cutlass/backend/evt/ir/dag_ir.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/ir/dag_ir.py", "repo_id": "cutlass", "token_count": 2967 }
52
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Preprocess the reduction nodes. The parser treats reduction as Compute(op=(reg_reduce_fn, gmem_reduce_fn)) - Store() This pass fuses these into a single store node, and then replaces all uses of the current node with the new store node. """ from cutlass.backend.evt.ir import ComputeNode, StoreNode from cutlass.backend.evt.passes.pass_manager import EVTPassBase class PassPreprocessRed(EVTPassBase): """ Preprocess red nodes """ def call(self): # Step 1: find the compute nodes with op=red red_compute_nodes = [] for node_meta in self.dag_ir.nodes_meta: if isinstance(node_meta, ComputeNode): if type(node_meta.fn) == tuple: # To keep the frontend simple, the reduction nodes # are parsed into compute nodes by default # The simple heuristic to distinguish between compute # and reduction node is that compute node is a single function, # while the reduction node is a tuple of functions for # in-register reduction and atomic global memory reduction red_compute_nodes.append(node_meta.name) # Step 2: for each compute, merge it with the succeeding store for node in red_compute_nodes: # Verify users = self.dag_ir.get_users(node) inputs = self.dag_ir.get_all_inputs(node) # Has a single user assert len(users) == 1 assert len(inputs) == 1 user = users[0] input = inputs[0] user_meta = self.dag_ir.get_node_meta(user) # Must be a store node assert isinstance(user_meta, StoreNode) # With output degree == 0 assert self.dag_ir.out_degree(user) == 0 # Register the reduce op node_meta = self.dag_ir.get_node_meta(node) user_meta.reg_reduce_fn, user_meta.gmem_reduce_fn = node_meta.fn user_meta.element_compute = node_meta.element_compute user_meta.round_style = node_meta.round_style # Replace all uses self.dag_ir.remove_edge(input, node) input_users = self.dag_ir.get_users(input) for iu in input_users: weight = self.dag_ir.get_edge_weight(input, iu) self.dag_ir.add_edge(user, iu, weight) self.dag_ir.remove_edge(input, iu) self.dag_ir.add_edge(input, user) self.dag_ir.remove_node(node) # Register the reduction name self.dag_ir.reduction_names.append(user)
cutlass/python/cutlass/backend/evt/passes/pass_preprocess_red.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/passes/pass_preprocess_red.py", "repo_id": "cutlass", "token_count": 1697 }
53
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for emitting Conv2d kernels """ import enum import logging import os.path import shutil from string import Template try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * from cutlass_library.conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes except ImportError: from library import * from conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes _LOGGER = logging.getLogger(__name__) ################################################################################################### # class Conv2dOperation: # def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \ stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity1, \ group_mode = GroupMode.NoneGroup): self.operation_kind = OperationKind.Conv2d self.arch = arch self.tile_description = tile_description self.conv_kind = conv_kind self.A = A self.B = B self.C = C self.element_epilogue = element_epilogue self.epilogue_functor = epilogue_functor self.iterator_algorithm = iterator_algorithm self.stride_support = stride_support self.swizzling_functor = swizzling_functor self.group_mode = group_mode # def is_complex(self): complex_operators = [ MathOperation.multiply_add_complex, MathOperation.multiply_add_complex_gaussian ] return self.tile_description.math_instruction.math_operation in complex_operators # def is_mixed_input(self): return self.A.element != self.B.element # def accumulator_type(self): accum = self.tile_description.math_instruction.element_accumulator if self.is_complex(): return get_complex_from_real(accum) return accum # def core_name(self): ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' intermediate_type = '' if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) if self.tile_description.math_instruction.element_a != self.A.element and \ self.tile_description.math_instruction.element_a != self.accumulator_type(): intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] else: inst_shape = '' return "%s%s%s%s_%s" % (ShortDataTypeNames[self.accumulator_type()], \ inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm]) # def extended_name(self): ''' Append data types if they differ from compute type. ''' if self.C.element != self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${element_c}_${core_name}_${element_a}" elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${core_name}_${element_a}" else: extended_name = "${core_name}" extended_name = SubstituteTemplate(extended_name, { 'element_a': DataTypeNames[self.A.element], 'element_c': DataTypeNames[self.C.element], 'core_name': self.core_name() }) return extended_name # def layout_name(self): return "%s" % (ShortLayoutTypeNames[self.A.layout]) # def configuration_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] threadblock = self.tile_description.procedural_name() # grouped conv if self.group_mode != GroupMode.NoneGroup: group_conv_name = f"{GroupModeNames[self.group_mode]}_" else: group_conv_name = "" if self.stride_support == StrideSupport.Unity: configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_${group_conv_name}align${alignment}" else: configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${group_conv_name}align${alignment}" return SubstituteTemplate( configuration_name, { 'opcode_class': opcode_class_name, 'extended_name': self.extended_name(), 'threadblock': threadblock, 'layout': self.layout_name(), 'alignment': "%d" % self.A.alignment, 'group_conv_name': group_conv_name } ) # def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' return self.configuration_name() ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### class EmitConv2dInstance: def __init__(self): # Emitter for CUTLASS 3 convolution operations self.conv3x_emitter = EmitConv3xInstance() self.template = """ // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" using ${operation_name}_base = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, ${stages}, ${math_operator}, ${iterator_algorithm}, ${stride_support}, ${align_a}, ${align_b} >::Kernel; """ self.template_group_conv = """ // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" using ${operation_name}_base = typename cutlass::conv::kernel::DefaultConv2dGroup${conv_kind_name}< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, ${stages}, ${math_operator}, ${group_mode}, ${iterator_algorithm}, ${stride_support}, ${align_a}, ${align_b} >::Kernel; """ self.template_depthwise_direct_conv = """ // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" using ${operation_name}_base = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConv${conv_kind_name}< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::conv::TensorNHWCShape<${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}, ${groups_per_cta}>, cutlass::MatrixShape<${filter_shape_r}, ${filter_shape_s}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue}, cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling >, cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}>, ${stages}, ${math_operator}, ${iterator_algorithm}, ${stride_support}, cutlass::MatrixShape<${stride_r}, ${stride_s}>, cutlass::MatrixShape<${dilation_r}, ${dilation_s}> >::Kernel; """ def arch_number_to_type(self, arch: int): return f"cutlass::arch::Sm{arch}" def emit(self, operation): _LOGGER.debug("*** EmitConv2dInstance::emit") _LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name()) if hasattr(operation, 'is_3x') and operation.is_3x: _LOGGER.debug("*** CUTLASS 3 operation") return self.conv3x_emitter.emit(operation) _LOGGER.debug("*** CUTLASS 2 operation") warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) values = { 'operation_name': operation.procedural_name(), 'conv_kind': ConvKindTag[operation.conv_kind], 'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm], 'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), 'stride_support': StrideSupportTag[operation.stride_support], 'math_operator': 'cutlass::arch::OpMultiplyAddComplex' if operation.is_complex() else \ MathOperationTag[operation.tile_description.math_instruction.math_operation], 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), } if operation.group_mode == GroupMode.NoneGroup: _LOGGER.debug("*** group_mode=NoneGroup") return SubstituteTemplate(self.template, values) elif operation.group_mode == GroupMode.Depthwise: _LOGGER.debug("*** group_mode=Depthwise") values['group_mode'] = GroupModeTag[operation.group_mode] # Setup other template params values['threadblock_output_shape_n'] = str(operation.tile_description.threadblock_output_shape[0]) values['threadblock_output_shape_p'] = str(operation.tile_description.threadblock_output_shape[1]) values['threadblock_output_shape_q'] = str(operation.tile_description.threadblock_output_shape[2]) values['groups_per_cta'] = str(operation.tile_description.threadblock_output_shape[3]) values['filter_shape_r'] = str(operation.tile_description.filter_shape[0]) values['filter_shape_s'] = str(operation.tile_description.filter_shape[1]) values['stride_r'] = str(operation.tile_description.stride[0]) values['stride_s'] = str(operation.tile_description.stride[1]) values['dilation_r'] = str(operation.tile_description.dilation[0]) values['dilation_s'] = str(operation.tile_description.dilation[1]) return SubstituteTemplate(self.template_depthwise_direct_conv, values) else: _LOGGER.debug("*** group_mode=" + GroupModeTag[operation.group_mode]) values['group_mode'] = GroupModeTag[operation.group_mode] return SubstituteTemplate(self.template_group_conv, values) ################################################################################################### # # Generator functions for all layouts # ################################################################################################### # def GenerateConv2dTensorOp(manifest, tile_descriptions, min_cc, align = 128): _LOGGER.debug("*** GenerateConv2dTensorOp") for tile in tile_descriptions: for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]): # output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \ if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \ else [tile.math_instruction.element_accumulator,] for output_type in output_types: A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_a])) B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_b])) C = TensorDescription(output_type, LayoutType.TensorNHWC, max(1, int(align / DataTypeSize[output_type]))) manifest.append(Conv2dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator)) class EmitConv2dIncludes: '''Emit includes that are specific to the operation.''' def __init__(self): self.includes = ['conv2d_operation.h'] self.emitter_3x = EmitConv3xIncludes() def operation_is_3x(self, operation) -> bool: """Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)""" return hasattr(operation, 'is_3x') and operation.is_3x def emit(self, operation) -> str: if self.operation_is_3x(operation): return self.emitter_3x.emit(operation) return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \ "\n\n///////////////////////////////////////////////////////////////////////////////////////////////////" ################################################################################################### # # Emitters functions for all targets # ################################################################################################### class EmitConv2dConfigurationLibrary: def __init__(self, operation_path, configuration_name): self.configuration_name = configuration_name self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name) self.instance_emitter = EmitConv2dInstance() self.includes_emitter = EmitConv2dIncludes() self.header_template = """ /* Generated by conv2d_operation.py - Do not edit. */ /////////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" #include "library_internal.h" """ self.instance_template = """ ${stub_begin} ${operation_instance} // Derived class struct ${operation_name} : public ${operation_name}_base { }; ${stub_end} /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.configuration_header = """ namespace cutlass { namespace library { // Initialize all instances void initialize_${configuration_name}(Manifest &manifest) { """ self.configuration_instance = """${stub_begin} using Operation_${operation_name} = cutlass::conv::device::${kernel_name}< ${operation_name}>; manifest.append(new cutlass::library::${operation_wrapper}< Operation_${operation_name} >( "${operation_name}" )); ${stub_end} """ self.configuration_epilogue = "}\n" self.epilogue_template = """ /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// """ def operation_is_3x(self, operation): """Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)""" return hasattr(operation, 'is_3x') and operation.is_3x def __enter__(self): """ Open the configuration_file, and write the "header" C++ code to it. The "header" consists of a comment (that this is generated code, so it should not be edited), and includes that are common to all kinds of kernels. """ _LOGGER.debug('*** EmitConv2dConfigurationLibrary::__enter__') _LOGGER.debug('*** configuration_path (file to write): ' + str(self.configuration_path)) _LOGGER.debug('*** configuration_name: ' + self.configuration_name) self.configuration_file = open(self.configuration_path, "w") self.configuration_file.write(SubstituteTemplate(self.header_template, { 'configuration_name': self.configuration_name })) self.operations = [] return self def emit(self, operation): """ Write three pieces of C++ code to the configuration_file (that was opened by the __enter__ method above): 1. the header includes that are specific to the operation (CUTLASS 2 vs. CUTLASS 3); 2. the "operation instance" (a "using" declaration ending in "_base"); and 3. the "operation name" (declaration and definition of a derived class of the above operation instance). The "using" declaration turns a C++ class name, possibly namespace-qualified, possibly also with angle brackets, into a C-style, easily demangled identifier. """ _LOGGER.debug('*** EmitConv2dConfigurationLibrary::emit') _LOGGER.debug('*** operation.procedural_name(): ' + operation.procedural_name()) self.operations.append(operation) self.configuration_file.write(self.includes_emitter.emit(operation)) stub_begin = '' stub_end = '' # It can be useful to stub (comment) out instantiations for testing. # In this case, one need only set is_stub to True. is_stub = False if is_stub: stub_begin = "// STUB for now\n#if 0" stub_end = '#endif // 0' self.configuration_file.write(Template(self.instance_template).substitute({ 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'operation_instance': self.instance_emitter.emit(operation), 'stub_begin': stub_begin, 'stub_end': stub_end })) def __exit__(self, exception_type, exception_value, traceback): """ Write the rest of the C++ code to the configuration_file, and close the file. The "rest of the C++ code" has the following components. 1. Configuration header: Open the namespace(s), and open the definition of the "initialize_${configuration_name}" registration function that registers the operation with the Manifest. ("Registration" helps turn C++ compile-time polymorphism (via template parameters) into a run-time choice of parameters.) 2. Configuration instance: In the body of the registration function, make a "using" declaration Operation_${operation_name} for the operation type (which uses operation_name as its template argument). Then, tell the manifest about the operation via a "manifest.append" call. The argument of the call is a new instance of "SomethingOperation<Operation_${operation_name}>" (replace Something with a specific name). 3. Configuration epilogue: Close the definition of the registration function. 4. Epilogue template: Close the namespace(s). """ _LOGGER.debug('*** EmitConv2dConfigurationLibrary::__exit__') _LOGGER.debug('*** configuration_path (file to write): ' + str(self.configuration_path)) _LOGGER.debug('*** configuration_name: ' + self.configuration_name) self.configuration_file.write(SubstituteTemplate(self.configuration_header, { 'configuration_name': self.configuration_name })) for operation in self.operations: stub_begin = '' stub_end = '' # It can be useful to stub (comment) out instantiations for testing. # In this case, one need only set is_stub to True. is_stub = False if is_stub: stub_begin = "// STUB for now\n#if 0" stub_end = "#endif // 0" if operation.group_mode == GroupMode.Depthwise: kernel_name = 'DirectConvolution' operation_wrapper = 'DirectConv2dOperation' else: kernel_name = 'ImplicitGemmConvolution' operation_wrapper = 'Conv2dOperation' if self.operation_is_3x(operation): kernel_name = 'ConvUniversalAdapter' operation_wrapper = 'ConvOperation3x' self.configuration_file.write(SubstituteTemplate(self.configuration_instance, { 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'kernel_name': kernel_name, 'operation_wrapper': operation_wrapper, 'stub_begin': stub_begin, 'stub_end': stub_end })) self.configuration_file.write(self.configuration_epilogue) self.configuration_file.write(self.epilogue_template) self.configuration_file.close() ################################################################################################### ###################################################################################################
cutlass/python/cutlass_library/conv2d_operation.py/0
{ "file_path": "cutlass/python/cutlass_library/conv2d_operation.py", "repo_id": "cutlass", "token_count": 8651 }
54
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Tests the high-level Conv2d interface """ from math import ceil import unittest import cutlass import cutlass.utils.datatypes as datatypes from cutlass.backend.utils.device import device_cc from utils import ExpectException import os class Conv2dEquivalence: """ Helper class for testing the equivalence of different constructions of the Conv2d interface """ def __init__(self, conv_kind, element_A, element_B, element_C, element_D, element_accumulator, alignment_A, alignment_B, alignment_C): self.element_A = element_A self.element_B = element_B self.element_C = element_C self.element_D = element_D self.element_accumulator = element_accumulator self.alignment_A = alignment_A self.alignment_B = alignment_B self.alignment_C = alignment_C self.conv_kind = conv_kind self.plan = cutlass.op.Conv2d( kind=self.conv_kind, element_A=element_A, element_B=element_B, element_C=element_C, element_D=element_D, element_accumulator=element_accumulator) self.op = self.plan.construct( alignment_A=self.alignment_A, alignment_B=self.alignment_B, alignment_C=self.alignment_C) def _plans_equal(self, other_plan) -> bool: """ Compares whether two plans are equal :param other_plan: plan to compare against the default Conv2d :type other_plan: cutlass.op.Conv2d :return: whether `other_plan` is equivalent to `self.plan` :rtype: bool """ other_op = other_plan.construct( alignment_A=self.alignment_A, alignment_B=self.alignment_B, alignment_C=self.alignment_C) return self.op.rt_module.emit() == other_op.rt_module.emit() def generic_test(self): """ Tests the equivalence of various constructions of the Conv2d interface when using CUTLASS data types and layouts for constructing the Conv2d interface """ if not datatypes.is_numpy_available(): return # Test when specifying all parameters plan_other = cutlass.op.Conv2d( kind=self.conv_kind, element_A=self.element_A, element_B=self.element_B, element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator) assert self._plans_equal(plan_other) # Test when specifying all parameters but A plan_other = cutlass.op.Conv2d( kind=self.conv_kind, element_B=self.element_B, element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator, element=self.element_A) assert self._plans_equal(plan_other) # Test when specifying all parameters but A and B as tensors using generic element and output plan_other = cutlass.op.Conv2d( kind=self.conv_kind, element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator, element=self.element_A) assert self._plans_equal(plan_other) # Test without explicit accumulator. Only run if the type of C and the accumulator are equal if self.element_C == self.element_accumulator: plan_other = cutlass.op.Conv2d( kind=self.conv_kind, element_C=self.element_C, element_D=self.element_D, element=self.element_A) assert self._plans_equal(plan_other) # Test with only the generic types. Only rune if the types of A, B, C, and D are the same if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D and self.element_A == self.element_accumulator): plan_other = cutlass.op.Conv2d(kind=self.conv_kind, element=self.element_A) assert self._plans_equal(plan_other) def numpy_test(self): """ Tests the equivalence of various constructions of the Conv2d interface when using numpy as a frontend """ if not datatypes.is_numpy_available(): return import numpy as np type_A = datatypes.numpy_type(self.element_A) type_B = datatypes.numpy_type(self.element_B) type_C = datatypes.numpy_type(self.element_C) type_D = datatypes.numpy_type(self.element_D) type_accum = datatypes.numpy_type(self.element_accumulator) size = (2, 2) A = np.zeros(size, dtype=type_A) B = np.zeros(size, dtype=type_B) C = np.zeros(size, dtype=type_C) D = np.zeros(size, dtype=type_D) return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D) def torch_test(self): """ Tests the equivalence of various constructions of the Conv2d interface when using torch as a frontend """ if not datatypes.is_torch_available(): return import torch type_A = datatypes.torch_type(self.element_A) type_B = datatypes.torch_type(self.element_B) type_C = datatypes.torch_type(self.element_C) type_D = datatypes.torch_type(self.element_D) type_accum = datatypes.torch_type(self.element_accumulator) size = (2, 2) A = torch.empty(size, dtype=type_A) B = torch.empty(size, dtype=type_B) C = torch.empty(size, dtype=type_C) D = torch.empty(size, dtype=type_D) return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D) def tensor_test(self, type_A, type_B, type_C, type_D, type_accum, A, B, C, D): # Test when specifying all parameters via tensors plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D, element_accumulator=type_accum) assert self._plans_equal(plan_np) # Test when specifying all parameters but A as tensors plan_np = cutlass.op.Conv2d(kind=self.conv_kind, B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A) assert self._plans_equal(plan_np) # Test when specifying all parameters but A and B as tensors and using generic element and output if type_A == type_B: plan_np = cutlass.op.Conv2d(kind=self.conv_kind, C=C, D=D, element_accumulator=type_accum, element=type_A) assert self._plans_equal(plan_np) # Test without explicit accumulator. Only run if the type of C and the accumulator. if type_C == type_accum: plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D) assert self._plans_equal(plan_np) # Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same. if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum): plan_np = cutlass.op.Conv2d(kind=self.conv_kind, element=type_A) assert self._plans_equal(plan_np) def test_all(self): """ Runs all tests on the Gemm interface """ self.generic_test() self.numpy_test() self.torch_test() @unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.') class ConvEquivalenceTest(unittest.TestCase): """ Tests the equivalence of different constructions of the Conv2d interface """ pass type2alignment = { cutlass.DataType.f16: 8, cutlass.DataType.f32: 4 } def add_test(conv_kind, element_A, element_B, element_C, element_D, element_accumulator): test_name = f"test_conv2d_{conv_kind}_{element_A}_{element_B}_{element_C}_{element_D}_{element_accumulator}" def run(self): conv2d_eq = Conv2dEquivalence( conv_kind=conv_kind, element_A=element_A, element_B=element_B, element_C=element_C, element_D=element_D, element_accumulator=element_accumulator, alignment_A=type2alignment[element_A], alignment_B=type2alignment[element_B], alignment_C=type2alignment[element_C] ) conv2d_eq.test_all() setattr(ConvEquivalenceTest, test_name, run) for conv_kind in ["fprop", "wgrad", "dgrad"]: for types in [ [cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16], [cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32], [cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f16], [cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32], [cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32] ]: add_test(conv_kind, types[0], types[1], types[2], types[3], types[4]) @unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.') class Conv2dErrorTests(unittest.TestCase): """ Tests various error scenarios that arise with the high-level Gemm interface """ def test_alignment(self): """ Tests case in which the alignment specified is unsupported """ plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16) with ExpectException(True, 'Alignment 3 is not supported for F16. The construction should fail.'): op = plan.construct(alignment_A=3, alignment_B=3, alignment_C=3) def test_invalid_tile_description(self): """ Tests scenarios in which an invalid tile description is provided for a given CC """ plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16) td = plan.tile_descriptions()[0] td.threadblock_shape=[17, 32, 5] plan.tile_description = td with ExpectException(True, 'The threadblock shape is invalid. The compilation should fail.'): plan.compile() # Clean up the error message os.remove("./cutlass_python_compilation_device_error.txt") if __name__ == '__main__': unittest.main()
cutlass/test/python/cutlass/interface/conv2d_interface.py/0
{ "file_path": "cutlass/test/python/cutlass/interface/conv2d_interface.py", "repo_id": "cutlass", "token_count": 4946 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM testbed sizes for Conv2d problem */ #pragma once #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_types.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" namespace test { namespace conv { namespace device { using Conv3dProblemVector = std::vector<cutlass::conv::Conv3dProblemSize>; //////////////////////////////////////////////////////////////////////////// /// Structure TestbedConv3dProblemSizes initializes and holds conv default and /// important network sizes //////////////////////////////////////////////////////////////////////////// struct TestbedConv3dProblemSizes { // // Data members // int minimum_channel_size; Conv3dProblemVector conv3d_default_sizes; Conv3dProblemVector conv3d_vnet_medical_sizes; // // Methods // /// Default ctor TestbedConv3dProblemSizes(int minimum_channel_size_ = 64): minimum_channel_size (minimum_channel_size_) { initialize_conv3d_default_sizes(); initialize_conv3d_vnet_medical_sizes(conv3d_vnet_medical_sizes, 1 /*batch-size*/); filter_all(); } /// Eliminates some illegal cases void filter_all() { Conv3dProblemVector *problems_vectors[] = { &conv3d_default_sizes, &conv3d_vnet_medical_sizes }; for (Conv3dProblemVector *problems : problems_vectors) { Conv3dProblemVector filtered; for (cutlass::conv::Conv3dProblemSize const & problem : *problems) { if (!(problem.C % minimum_channel_size)) { filtered.push_back(problem); } } *problems = filtered; } } // Add a few standard convolution problem sizes void initialize_conv3d_default_sizes() { conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 3, 3, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 1, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC) CUTLASS_STL_NAMESPACE::make_tuple( cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w) ), cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) CUTLASS_STL_NAMESPACE::make_tuple( cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w) ), cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 16, 16, 16, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 15, 19, 160}, // input size (NDHWC) {224, 1, 3, 6, 160}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 2, 1, 1, minimum_channel_size}, // input size (NDHWC) {8, 2, 1, 1, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 7, 7, minimum_channel_size}, // input size (NDHWC) {16, 1, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 11, 15, 19, 64}, // input size (NDHWC) {32, 4, 3, 6, 64}, // filter size (KTRSC) cutlass::Coord<3>({2, 1, 3}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); } // Add vnet layers to unit testing sizes void initialize_conv3d_vnet_medical_sizes(Conv3dProblemVector &conv3d_problem_vector, int batch_size = 1) { conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 32, 32, 32, 16}, // input size (NDHWC) {32, 2, 2, 2, 16}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {32, 3, 3, 3, 32}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {64, 2, 2, 2, 32}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 64}, // input size (NDHWC) {64, 3, 3, 3, 64}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 64}, // input size (NDHWC) {128, 2, 2, 2, 64}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 4, 4, 4, 128}, // input size (NDHWC) {128, 3, 3, 3, 128}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 128}, // input size (NDHWC) {128, 3, 3, 3, 128}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 64}, // input size (NDHWC) {64, 3, 3, 3, 64}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 32, 32, 32, 16}, // input size (NDHWC) {64, 2, 2, 2, 16}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {128, 2, 2, 2, 32}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); } }; } // namespace device } // namespace conv } // namespace test
cutlass/test/unit/conv/device/conv3d_problems.h/0
{ "file_path": "cutlass/test/unit/conv/device/conv3d_problems.h", "repo_id": "cutlass", "token_count": 6131 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2> *dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2> *>(output + tid * 2); cutlass::Array<float, 2> const *src_ptr = reinterpret_cast<cutlass::Array<float, 2> const *>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); convert_bf16_f32<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); convert_and_pack_bf16<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = { {0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0} }; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const &>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/core/bfloat16.cu/0
{ "file_path": "cutlass/test/unit/core/bfloat16.cu", "repo_id": "cutlass", "token_count": 2746 }
57
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <cute/tensor.hpp> #include "../cooperative_gemm_common.hpp" using namespace cute; TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA) { using value_type = float; constexpr uint32_t m = 64; constexpr uint32_t n = 32; constexpr uint32_t k = 16; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>, Layout<Shape<_16, _8, _1>> >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication) { using value_type = float; constexpr uint32_t m = 88; constexpr uint32_t n = 20; constexpr uint32_t k = 12; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>, Layout<Shape<_2, _64, _1>> >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication2) { using value_type = float; constexpr uint32_t m = 88; constexpr uint32_t n = 36; constexpr uint32_t k = 24; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>, Layout<Shape<_4, _32, _1>> >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication3) { using value_type = float; constexpr uint32_t m = 67; constexpr uint32_t n = 13; constexpr uint32_t k = 11; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>, Layout<Shape<_1, _128, _1>> >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm2_DoubleFMA) { using value_type = double; constexpr uint32_t m = 16; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>, Layout<Shape<_16, _8, _1>> >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm3_Float_FMA_CustomPermutationMNK) { using value_type = float; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 256; using tiled_mma_t = TiledMMA< MMA_Atom< UniversalFMA<value_type, value_type, value_type, value_type> >, Layout< Shape<_16, _16, _1> >, Tile< Layout< Shape<_16,_2>, Stride<_2,_1> >, // 32x32x1 MMA with perm for load vectorization Layout< Shape<_16,_2>, Stride<_2,_1> >, Underscore > >; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm4_Half_MMA) { using value_type = cutlass::half_t; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>, Layout<Shape<_4, _4, _1>> >; using smem_a_atom_layout_t = typename tiled_mma_t::AtomLayoutB_TV; using smem_b_atom_layout_t = typename tiled_mma_t::AtomLayoutA_TV; using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}))); test_cooperative_gemm_col_major_layout<smem_a_atom_layout_t, smem_b_atom_layout_t, smem_c_atom_layout_t, m, n, k, thread_block_size, tiled_mma_t, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA) { using value_type = cutlass::half_t; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>, Layout<Shape<_4, _4, _1>> >; using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{}))); using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{})); using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}))); using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{}))); using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{})); using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}))); test_cooperative_gemm<gmem_a_layout_t, gmem_b_layout_t, gmem_c_layout_t, smem_a_layout_t, smem_b_layout_t, smem_c_layout_t, AutoVectorizingCopyWithAssumedAlignment<128>, // A AutoVectorizingCopyWithAssumedAlignment<128>, // B AutoVectorizingCopyWithAssumedAlignment<128>, // C thread_block_size, tiled_mma_t, 128, value_type, value_type, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA_Predicated) { using value_type = cutlass::half_t; constexpr uint32_t m = 31; constexpr uint32_t n = 27; constexpr uint32_t k = 17; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>, Layout<Shape<_4, _4, _1>> >; using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{}))); using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{})); using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}))); using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{}))); using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{})); using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}))); test_cooperative_gemm<gmem_a_layout_t, gmem_b_layout_t, gmem_c_layout_t, smem_a_layout_t, smem_b_layout_t, smem_c_layout_t, AutoVectorizingCopyWithAssumedAlignment<16>, // A AutoVectorizingCopyWithAssumedAlignment<16>, // B AutoVectorizingCopyWithAssumedAlignment<16>, // C thread_block_size, tiled_mma_t, 16, value_type, value_type, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm6_Half_MAA_SwizzledSmemLayouts) { using value_type = cutlass::half_t; constexpr uint32_t m = 128; constexpr uint32_t n = 128; constexpr uint32_t k = 64; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>, Layout<Shape<_4, _4, _1>> >; using smem_a_atom_layout_t = decltype( composition(Swizzle<3,3,3>{}, Layout<Shape < _8,_64>, Stride<_64, _1>>{})); using smem_b_atom_layout_t = decltype( composition(Swizzle<3,3,3>{}, Layout<Shape <_64, _8>, Stride< _1,_64>>{})); using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{})); using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<k> {}), GenRowMajor{})); using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n> {}, Int<k> {}), GenColMajor{})); using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}), GenRowMajor{})); using smem_a_atom_layout_t = smem_a_atom_layout_t; using smem_a_layout_t = decltype(tile_to_shape( smem_a_atom_layout_t{}, make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{}))) ); // Transposed using smem_b_atom_layout_t = smem_b_atom_layout_t; using smem_b_layout_t = decltype(tile_to_shape( smem_b_atom_layout_t{}, make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{}))) ); using smem_c_atom_layout_t = smem_c_atom_layout_t; using smem_c_layout_t = decltype(tile_to_shape( smem_c_atom_layout_t{}, make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{}))) ); test_cooperative_gemm<gmem_a_layout_t, gmem_b_layout_t, gmem_c_layout_t, smem_a_layout_t, smem_b_layout_t, smem_c_layout_t, AutoVectorizingCopyWithAssumedAlignment<128>, // A AutoVectorizingCopyWithAssumedAlignment<128>, // B AutoVectorizingCopyWithAssumedAlignment<128>, // C thread_block_size, tiled_mma_t, 128, value_type, value_type, value_type>(); } TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_FMA) { using TA = float; using TB = float; using TC = double; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<TC, TA, TB, TC>>, Layout<Shape<_16, _8, _1>> >; auto aload = cute::negate {}; auto bload = cute::negate {}; auto cload = cute::negate {}; auto cstore = cute::negate {}; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>( aload, bload, cload, cstore); } TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_MMA) { using value_type = cutlass::half_t; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>, Layout<Shape<_4, _4, _1>> >; auto aload = cute::negate {}; auto bload = cute::negate {}; auto cload = cute::negate {}; auto cstore = cute::negate {}; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>( aload, bload, cload, cstore); } template<class ConstantType> struct increment_by_x { ConstantType x; template <class T> CUTE_HOST_DEVICE constexpr T operator()(const T& arg) const { return arg + x; } }; template<class From, class To> struct convert_to { CUTE_HOST_DEVICE constexpr To operator()(const From& arg) const { return static_cast<To>(arg); } }; TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformCustomOp_FMA) { using TA = float; using TB = float; using TC = double; constexpr uint32_t m = 32; constexpr uint32_t n = 32; constexpr uint32_t k = 32; constexpr uint32_t thread_block_size = 128; using tiled_mma_t = TiledMMA< MMA_Atom<UniversalFMA<TC, TA, TB, TC>>, Layout<Shape<_16, _8, _1>> >; auto aload = increment_by_x<float>{1.111f}; auto bload = convert_to<float, double> {}; auto cload = cute::negate {}; auto cstore = cute::negate {}; test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>( aload, bload, cload, cstore); }
cutlass/test/unit/cute/volta/cooperative_gemm.cu/0
{ "file_path": "cutlass/test/unit/cute/volta/cooperative_gemm.cu", "repo_id": "cutlass", "token_count": 6602 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_thread_linear_combination, device_side_f16_f32_value) { using Element = float; using ElementOutput = cutlass::half_t; int const kCount = 8; using LinearCombination = cutlass::epilogue::thread::LinearCombination< ElementOutput, kCount, Element, Element>; Element alpha = Element(2); Element beta = Element(1); typename LinearCombination::Params params(alpha, beta); LinearCombination linear_combination_op(params); cutlass::Array<ElementOutput, kCount> source; cutlass::Array<Element, kCount> accum; for (int i = 0; i < kCount; ++i) { accum[i] = Element(i * 2); source[i] = ElementOutput((i * 7 % 9) - 4); } cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source); for (int i = 0; i < kCount; ++i) { ElementOutput expected = ElementOutput( alpha * accum[i] + beta * Element(ElementOutput(source[i])) ); ElementOutput got = destination[i]; EXPECT_TRUE(expected == got); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_thread_linear_combination, device_side_f16_f32_ptr) { using Element = float; using ElementOutput = cutlass::half_t; int const kCount = 8; using LinearCombination = cutlass::epilogue::thread::LinearCombination< ElementOutput, kCount, Element, Element>; Element alpha = Element(2); Element beta = Element(1); typename LinearCombination::Params params(&alpha, &beta); LinearCombination linear_combination_op(params); cutlass::Array<ElementOutput, kCount> source; cutlass::Array<Element, kCount> accum; for (int i = 0; i < kCount; ++i) { accum[i] = Element(i * 2); source[i] = ElementOutput((i * 7 % 9) - 4); } cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source); for (int i = 0; i < kCount; ++i) { ElementOutput expected = ElementOutput( alpha * accum[i] + beta * Element(ElementOutput(source[i])) ); ElementOutput got = destination[i]; EXPECT_TRUE(expected == got); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_thread_linear_combination_gelu, device_side_f16_f16_ptr) { using Element = cutlass::half_t; using ElementOutput = cutlass::half_t; int const kCount = 8; using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU< ElementOutput, kCount, Element, Element>; Element alpha = Element(1); Element beta = Element(0); typename LinearCombinationGELU::Params params(&alpha, &beta); LinearCombinationGELU linear_combination_op(params); cutlass::Array<Element, kCount> accum; for (int i = 0; i < kCount; ++i) { accum[i] = Element((float)i * 0.3f); } cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum); cutlass::epilogue::thread::GELU<ElementOutput> gelu_func; for (int i = 0; i < kCount; ++i) { ElementOutput expected = gelu_func(accum[i]); ElementOutput got = destination[i]; EXPECT_TRUE(expected == got); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_thread_linear_combination_gelu_taylor, device_side_f16_f16_ptr) { using Element = cutlass::half_t; using ElementOutput = cutlass::half_t; int const kCount = 8; using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU< ElementOutput, kCount, Element, Element>; Element alpha = Element(1); Element beta = Element(0); typename LinearCombinationGELU::Params params(&alpha, &beta); LinearCombinationGELU linear_combination_op(params); cutlass::Array<Element, kCount> accum; for (int i = 0; i < kCount; ++i) { accum[i] = Element((float)i * 0.3f); } cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum); cutlass::epilogue::thread::GELU<ElementOutput> gelu_func; for (int i = 0; i < kCount; ++i) { ElementOutput expected = gelu_func(accum[i]); ElementOutput got = destination[i]; EXPECT_TRUE(expected == got); } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/epilogue/thread/linear_combination.cu/0
{ "file_path": "cutlass/test/unit/epilogue/thread/linear_combination.cu", "repo_id": "cutlass", "token_count": 2037 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cutlass/numeric_types.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "../../common/cutlass_unit_test.h" #include "gemm_testbed_3x.hpp" #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) using namespace cute; /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::gemm::EpilogueTransposed >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::gemm::EpilogueTransposed >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecialized >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// //////////// CollectiveBuilder with KernelScheduleAuto ////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::gemm::EpilogueTransposed >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_64,_128,_32>; using ClusterShape_MNK = Shape<_4,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 4, ElementB, LayoutB, 4, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>()); } /////////////////////////////////////////////////////////////////////////////// #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu/0
{ "file_path": "cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu", "repo_id": "cutlass", "token_count": 8066 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> struct GemmWithBroadcastReferenceOp { using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; using ElementCompute = typename OutputOp::ElementCompute; using ElementZ = typename OutputOp::ElementZ; using ElementT = typename OutputOp::ElementT; typename OutputOp::BinaryOp binary_op; typename OutputOp::ElementwiseOp elementwise_op; GemmWithBroadcastReferenceOp() { } void operator()(ElementZ &Z, ElementT &T, ElementCompute gemm, ElementCompute bias) { ElementCompute t_full = binary_op(gemm, bias); if (OutputOp::kStoreT) { T = ElementT(t_full); } if (OutputOp::kStoreZ) { ElementCompute z_full = elementwise_op(t_full); Z = ElementZ(z_full); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Fused testbed // // Y = GEMM(AB, C) // // T[i, j] = BinaryOp(Y[i, j], Broadcast[i]) // // Z[i, j] = Elementwise(T[i, j]) // template < typename Gemm, typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm> > struct TestbedGemmWithBroadcast { using ElementA = typename Gemm::ElementA; using ElementB = typename Gemm::ElementB; using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; using ElementC = typename Gemm::ElementC; using ElementAccumulator = typename Gemm::ElementAccumulator; using ElementCompute = typename OutputOp::ElementCompute; using ElementVector = typename OutputOp::ElementVector; using ElementZ = typename OutputOp::ElementZ; using ElementT = typename OutputOp::ElementT; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A; // Input A cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B; // Input B cutlass::HostTensor<ElementC, typename Gemm::LayoutC> tensor_C; // Input C cutlass::HostTensor<ElementVector, typename Gemm::LayoutC> tensor_Broadcast; // Input Broadcast cutlass::HostTensor<ElementZ, typename Gemm::LayoutC> tensor_Z; cutlass::HostTensor<ElementT, typename Gemm::LayoutC> tensor_T; cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tensor_C_ref; cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tensor_Y_ref; cutlass::HostTensor<ElementZ, typename Gemm::LayoutC> tensor_Z_ref; cutlass::HostTensor<ElementT, typename Gemm::LayoutC> tensor_T_ref; // // Methods // TestbedGemmWithBroadcast( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the GEMM workspace // tensor_A.resize(problem_size.mk()); tensor_B.resize(problem_size.kn()); tensor_C.resize(problem_size.mn()); tensor_Z.resize(problem_size.mn()); tensor_T.resize(problem_size.mn()); tensor_Broadcast.resize({ problem_size.m(), 1 }); tensor_C_ref.resize(problem_size.mn()); tensor_Y_ref.resize(problem_size.mn()); tensor_Z_ref.resize(problem_size.mn()); tensor_T_ref.resize(problem_size.mn()); EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); EXPECT_TRUE(initialize_tensor(tensor_Broadcast.host_view(), init_C, seed + 2020)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1); for (int m = 0; m < tensor_C_ref.extent().row(); ++m) { for (int n = 0; n < tensor_C_ref.extent().column(); ++n) { tensor_C_ref.at({m, n}) = ElementAccumulator(tensor_C.at({m, n})); } } tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_Broadcast.sync_device(); tensor_Z.sync_device(); tensor_T.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementAccumulator alpha, ElementAccumulator beta) { tensor_Z.sync_host(); tensor_T.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (OutputOp::kStoreZ) { EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z_ref.host_view()), 0); } if (OutputOp::kStoreT) { EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T_ref.host_view()), 0); } bool passed = true; float norm_diff = 0; if (OutputOp::kStoreZ) { norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Z_ref.host_view(), tensor_Z.host_view(), float()); passed = (norm_diff <= 0.1f); EXPECT_LT(norm_diff, 0.1f) << " tensor_Z is incorrect"; } if (OutputOp::kStoreT) { norm_diff = cutlass::reference::host::TensorNormDiff(tensor_T_ref.host_view(), tensor_T.host_view(), float()); passed = (passed && (norm_diff <= 0.1f)); EXPECT_LT(norm_diff, 0.1f) << " tensor_T is incorrect"; } if (!passed) { /* std::stringstream fname; fname << "error_Gemm_device_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << Gemm::ThreadblockShape::kM << "x" << Gemm::ThreadblockShape::kN << "x" << Gemm::ThreadblockShape::kK << "_" << Gemm::WarpShape::kM << "x" << Gemm::WarpShape::kN << "x" << Gemm::WarpShape::kK << ".txt"; std::ofstream file(fname.str()); */ std::ofstream file("errors_testbed_gemm_with_broadcast.txt"); file << "problem: " << problem_size << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; file << "A =\n" << tensor_A.host_view() << "\nB =\n" << tensor_B.host_view() << "\nC =\n" << tensor_C.host_view() << "\nZ =\n" << tensor_Z.host_view() << "\nT =\n" << tensor_T.host_view() << "\n\n" << "\nY_ref =\n" << tensor_Y_ref.host_view() << "\nZ_ref =\n" << tensor_Z_ref.host_view() << "\nT_ref =\n" << tensor_T_ref.host_view(); } return passed; } /// Verifies the result is a GEMM bool verify( cutlass::gemm::GemmCoord problem_size, ElementAccumulator alpha, ElementAccumulator beta) { // // Verify // cutlass::reference::host::GemmComplex< typename Gemm::ElementA, typename Gemm::LayoutA, typename Gemm::ElementB, typename Gemm::LayoutB, ElementAccumulator, typename Gemm::LayoutC, ElementAccumulator, ElementAccumulator >( problem_size, alpha, tensor_A.host_ref(), Gemm::kTransformA, tensor_B.host_ref(), Gemm::kTransformB, beta, tensor_C_ref.host_ref(), tensor_Y_ref.host_ref(), ElementAccumulator(0) ); using ElementC = typename Gemm::ElementC; ReferenceOp reference_op; // compute tensor Z and tensor T for (int m = 0; m < problem_size.m(); ++m) { for (int n = 0; n < problem_size.n(); ++n) { ElementZ z; ElementT t; reference_op(z, t, tensor_Y_ref.at({m, n}), tensor_Broadcast.at({m, 0})); if (OutputOp::kStoreZ) { tensor_Z_ref.at({m, n}) = z; } if (OutputOp::kStoreT) { tensor_T_ref.at({m, n}) = t; } } } return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementAccumulator alpha = ElementAccumulator(1), ElementAccumulator beta = ElementAccumulator(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } this->initialize(problem_size); // // Initialize the GEMM operator // typename Gemm::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_Z.device_data(), tensor_Broadcast.device_data(), tensor_T.device_data(), problem_size.m() * problem_size.k(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_Z.layout().stride(0), 0, // This must be zero tensor_T.layout().stride(0), }; Gemm gemm_op; size_t workspace_size = Gemm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the GEMM // status = gemm_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = true; passed = this->verify(problem_size, alpha, beta); if (!passed) { std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl; } // // Profile // #if 0 // profiling disabled for now. int const kWorkspaces = 100; cutlass::DeviceAllocation<typename Gemm::ElementA> profiling_tensor_A(tensor_A.capacity() * kWorkspaces); cutlass::DeviceAllocation<typename Gemm::ElementB> profiling_tensor_B(tensor_B.capacity() * kWorkspaces); cutlass::DeviceAllocation<ElementC> profiling_tensor_C(tensor_C.capacity() * kWorkspaces); cutlass::DeviceAllocation<ElementC> profiling_tensor_Broadcast(tensor_Broadcast.capacity() * kWorkspaces); cutlass::DeviceAllocation<ElementZ> profiling_tensor_Z(tensor_Z.capacity() * kWorkspaces); cutlass::DeviceAllocation<ElementT> profiling_tensor_T(tensor_T.capacity() * kWorkspaces); cudaEvent_t events[2]; for (auto & event : events) { cudaError_t result = cudaEventCreate(&event); if (result != cudaSuccess) { EXPECT_EQ(result, cudaSuccess) << " cudaEventCreate() failed with error " << cudaGetErrorString(result); return false; break; } } int const kWarmupIterations = 5; int const kProfilingIterations = 100; for (int i = 0; i < kWarmupIterations; ++i) { status = gemm_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); } cudaError_t result = cudaEventRecord(events[0]); EXPECT_EQ(result, cudaSuccess); for (int i = 0; i < kProfilingIterations; ++i) { typename Gemm::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, profiling_tensor_A.get() + tensor_A.capacity() * (i % kWorkspaces), profiling_tensor_B.get() + tensor_B.capacity() * (i % kWorkspaces), profiling_tensor_C.get() + tensor_C.capacity() * (i % kWorkspaces), profiling_tensor_Z.get() + tensor_Z.capacity() * (i % kWorkspaces), profiling_tensor_Broadcast.get() + tensor_Broadcast.capacity() * (i % kWorkspaces), profiling_tensor_T.get() + tensor_T.capacity() * (i % kWorkspaces), problem_size.m() * problem_size.k(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_Z.layout().stride(0), 0, // This must be zero tensor_T.layout().stride(0), }; gemm_op.initialize(arguments, workspace.get()); status = gemm_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); } result = cudaEventRecord(events[1]); EXPECT_EQ(result, cudaSuccess); result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess); float elapsed_time = 0; result = cudaEventElapsedTime(&elapsed_time, events[0], events[1]); EXPECT_EQ(result, cudaSuccess); double average_time = double(elapsed_time) / double(kProfilingIterations); std::cout << problem_size << ": " << average_time << " ms" << std::endl; for (auto & event : events) { cudaEventDestroy(event); } #endif return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Gemm, typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm> > bool TestGemmWithBroadcast( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedGemmWithBroadcast<Gemm, ReferenceOp> testbed; using ElementAccumulator = typename Gemm::ElementAccumulator; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementAccumulator>(alpha), cutlass::from_real<ElementAccumulator>(beta) ); return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Gemm, typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm> > bool TestAllGemmWithBroadcast() { int M_problems[] = {8, 136, 264, 520}; int N_problems[] = {8, 136, 264, 520}; int K_problems[] = {8, 136, 264, 520}; double alpha_problems[] = {1.25, 2.25}; double beta_problems[] = {0, 1, 2.0}; bool passed = true; for (int M : M_problems) { for (int N : N_problems) { for (int K : K_problems) { for (double alpha : alpha_problems) { for (double beta : beta_problems) { TestbedGemmWithBroadcast<Gemm, ReferenceOp> testbed; using ElementAccumulator = typename Gemm::ElementAccumulator; passed = testbed.run( cutlass::gemm::GemmUniversalMode::kGemm, {M, N, K}, 1, cutlass::from_real<ElementAccumulator>(alpha), cutlass::from_real<ElementAccumulator>(beta) ); EXPECT_TRUE(passed) << "M: " << M << ", N: " << N << ", K: " << K << ", alpha: " << alpha << ", beta: " << beta; if (!passed) { return passed; } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/testbed_gemm_with_broadcast.h/0
{ "file_path": "cutlass/test/unit/gemm/device/testbed_gemm_with_broadcast.h", "repo_id": "cutlass", "token_count": 8380 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for threadblock level GEMV */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/tensor_ref.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/gemm/threadblock/gemv.h" #include "cutlass/gemm/threadblock/default_gemv_core.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemv, typename LongIndex, typename RefA, typename RefB, typename RefC> __global__ void batched_gemv_threadblock_test_kernel( cutlass::gemm::GemmCoord problem_size, LongIndex stride_a, LongIndex stride_b, LongIndex stride_c, RefA ref_A, RefB ref_B, RefC ref_C ) { typename Gemv::IteratorA::TensorCoord threadblock_offset_A(0, 0); typename Gemv::IteratorB::TensorCoord threadblock_offset_B(0, 0); typename Gemv::IteratorB::TensorCoord threadblock_offset_C(0, 0); // Move to the right batches for these threads ref_A.add_pointer_offset(threadIdx.y * stride_a); ref_B.add_pointer_offset(threadIdx.y * stride_b); ref_C.add_pointer_offset(threadIdx.y * stride_c); // Construct iterators to A and B operands typename Gemv::IteratorA::Params params_A(ref_A.layout()); typename Gemv::IteratorA iterator_A(params_A, ref_A.data(), { problem_size.m(), problem_size.k() }, 0, threadblock_offset_A); typename Gemv::IteratorB::Params params_B(ref_B.layout()); typename Gemv::IteratorB iterator_B(params_B, ref_B.data(), { problem_size.k(), problem_size.n() }, threadIdx.x, threadblock_offset_B); Gemv gemv; typename Gemv::FragmentC accum; accum.clear(); // Compute threadblock-scoped matrix multiply-add gemv(problem_size, accum, iterator_A, iterator_B, accum); // IteratorC is PitchLinear<> assumes n() contiguous typename Gemv::IteratorC::Params params_C(ref_C.layout()); typename Gemv::IteratorC iterator_C(params_C, ref_C.data(), { problem_size.m(), problem_size.n() }, threadIdx.x, threadblock_offset_C); iterator_C.store(accum); } ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename Shape_, typename ElementAB_, typename ElementC_, typename LayoutA_, typename LayoutB_, typename LayoutC_, int THREAD_N, int THREAD_K, int MAX_THREADS_PER_BLOCK=512, bool DEBUG=false> void batched_gemv_threadblock_test(cutlass::gemm::GemmCoord problem_size, int num_batch) { using Shape = Shape_; using ElementA = ElementAB_; using LayoutA = LayoutA_; using ElementB = ElementAB_; using LayoutB = LayoutB_; using ElementC = ElementC_; using LayoutC = LayoutC_; using ThreadShape = cutlass::gemm::GemmShape<1, THREAD_N, THREAD_K>; using Core = typename cutlass::gemm::threadblock::DefaultGemvCore< Shape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC >; if (DEBUG) { num_batch = 1; } using Mma = cutlass::gemm::threadblock::Gemv<Core>; // Create host tensors that will be the backing store for the batches // Note that no device memory is initially allocated cutlass::HostTensor<ElementA, LayoutA> matrix_A({problem_size.m(), problem_size.k()}, false); cutlass::HostTensor<ElementB, LayoutB> matrix_B({problem_size.k(), problem_size.n()}, false); cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed({problem_size.m(), problem_size.n()}, false); cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference({problem_size.m(), problem_size.n()}, false); // Reserve memory for the batch of tensors matrix_A.reserve(problem_size.m()*problem_size.k()*num_batch); matrix_B.reserve(problem_size.n()*problem_size.k()*num_batch); matrix_C_computed.reserve(problem_size.m()*problem_size.n()*num_batch); matrix_C_reference.reserve(problem_size.m()*problem_size.n()*num_batch, false); // Fill eatch tensor batch const int seed = 6834; for (int b = 0; b < num_batch; b++) { if(DEBUG) { cutlass::reference::host::BlockFillSequential( matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity()); cutlass::reference::host::BlockFillSequential( matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity()); } else { cutlass::reference::host::TensorFillRandomUniform( matrix_A.host_view(b*matrix_A.capacity()), seed + 1660, 8, -8, 0 ); cutlass::reference::host::TensorFillRandomUniform( matrix_B.host_view(b*matrix_B.capacity()), seed + 1880, 8, -8, 0 ); } cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity())); cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity())); } matrix_A.sync_device(); matrix_B.sync_device(); matrix_C_computed.sync_device(); dim3 grid(1, 1); // only 1 CTA is used dim3 block(Shape::kN / THREAD_N, num_batch, 1); #if 0 printf("block dim = %d x %d\n", block.x, block.y); #endif // Some sanity checks EXPECT_TRUE( problem_size.n() % THREAD_N == 0 ); EXPECT_TRUE( block.x*block.y <= MAX_THREADS_PER_BLOCK ); test::gemm::threadblock::batched_gemv_threadblock_test_kernel<Mma><<< grid, block >>>( problem_size, matrix_A.capacity(), matrix_B.capacity(), matrix_C_computed.capacity(), matrix_A.device_ref(), matrix_B.device_ref(), matrix_C_computed.device_ref() ); cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); matrix_C_computed.sync_host(); // Compute the batched gemms for (int b = 0; b < num_batch; b++) { cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementC, ElementC> reference_gemm; reference_gemm( problem_size.mnk(), ElementC(1), matrix_A.host_ref(b*matrix_A.capacity()), matrix_B.host_ref(b*matrix_B.capacity()), ElementC(0), matrix_C_reference.host_ref(b*matrix_C_computed.capacity()) ); bool passed = cutlass::reference::host::TensorEquals( matrix_C_computed.host_view(b*matrix_C_computed.capacity()), matrix_C_reference.host_view(b*matrix_C_reference.capacity())); EXPECT_TRUE(passed) //<< "A:\n" << matrix_A.host_view() << "\n" //<< "B:\n" << matrix_B.host_view() << "\n" << "Batch: " << b << "\n" << "Reference:\n" << matrix_C_reference.host_view(b*matrix_C_reference.capacity()) << "\n" << "Computed:\n" << matrix_C_computed.host_view(b*matrix_C_computed.capacity()) << "\n"; } } } // namespace threadblock } // namespace gemm } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// // A: ColumnMajor // B: RowMajor // C: ColumnMajor TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp32_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 5x1x128x128_crc_fp32_fp32_4N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 128, 128); const int num_batch = 5; const int THREAD_N = 4; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp32_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_8K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 8; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp16_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_i8_i32_2N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_i8_i32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } // A: RowMajor // B: ColumnMajor // C: RowMajor TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp32_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcr_fp32_fp32_4N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 128, 128); const int num_batch = 5; const int THREAD_N = 4; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp32_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_8K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 8; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp16_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_i8_i32_2N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_i8_i32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } // A: RowMajor // B: ColumnMajor // C: ColumnMajor TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp32_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcc_fp32_fp32_4N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 128, 128); const int num_batch = 5; const int THREAD_N = 4; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp32_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_2K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 2; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_8K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 8; using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp16_fp32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, cutlass::half_t, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_i8_i32_2N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 64, 64); const int num_batch = 4; const int THREAD_N = 2; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); } TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_i8_i32_1N_4K) { using namespace test::gemm::threadblock; cutlass::gemm::GemmCoord problem_size(1, 17, 64); const int num_batch = 16; const int THREAD_N = 1; const int THREAD_K = 4; using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>; batched_gemv_threadblock_test<Shape, int8_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, THREAD_N, THREAD_K>(problem_size, num_batch); }
cutlass/test/unit/gemm/threadblock/batched_gemv.cu/0
{ "file_path": "cutlass/test/unit/gemm/threadblock/batched_gemv.cu", "repo_id": "cutlass", "token_count": 11925 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "cutlass/arch/wmma.h" #ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED #include "mma_pipelined_testbed.h" #include "cutlass/gemm/threadblock/default_mma_core_wmma.h" /// All tests use double-buffered (kStages=2) mma pipeline for the gemm mainloop /// Test name format: SM[arch]_gemm_threadblock_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[atype].[threadblock_shape]_[warp_shape]_[instruction_shape] ///////////////////////////////////////////////////////////////////////// /// Integer (s8 and u8) WMMA threadblock level tests ///// ///////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED) TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x32_64x64x32_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x64_64x64x64_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x32_64x64x32_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x64_64x64x64_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } #endif //CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED //////////////////////////////////////////////////////////////////////// /// SUBBYTE (s4 and b1) WMMA threadblock level tests //// /////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED) TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s4, 64x64x128_64x64x128_8x8x32) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_s4, 64x64x64_64x64x64_8x8x32) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_b1, 64x64x512_64x64x512_8x8x128) { using ElementA = cutlass::uint1b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::uint1b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages, cutlass::arch::OpXorPopc>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_b1, 64x64x512_64x64x512_8x8x128) { using ElementA = cutlass::uint1b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::uint1b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; static const int kStages = 2; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages, cutlass::arch::OpXorPopc>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } #endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED #endif //CUTLASS_ARCH_WMMA_SM75_ENABLED
cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu/0
{ "file_path": "cutlass/test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu", "repo_id": "cutlass", "token_count": 4841 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for TensorReduce family of device-wide operators */ #include <iostream> #include <limits> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/reduction/device/tensor_reduce.h" #include "cutlass/functional.h" #include "cutlass/layout/tensor.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/tensor_view_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// This reduces the W dimension, transforming an NHWC tensor into NHWC with W=1. template < typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute > bool TestAllReduction_NHWC_reduce_w(ElementCompute reduction_identity = ElementCompute()) { using Layout = typename TensorReduction::Layout; using ElementOutput = typename TensorReduction::ElementOutput; using ElementSource = typename TensorReduction::ElementSource; int const kV = TensorReduction::kVectorLength; int const N_indices[] = {1, 2, 5, 10}; int const H_indices[] = {1, 3, 9 }; int const W_indices[] = {1, 5, 19, 40, 224}; int const C_indices[] = { kV, 2 * kV, 5 * kV, 9 * kV, 17 * kV, 39 * kV, 257 * kV, kV * 760 }; using Element = int; for (int N : N_indices) { for (int H : H_indices) { for (int W : W_indices) { for (int C : C_indices) { cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C}); cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, 1, C}); cutlass::reference::host::TensorFillRandomUniform( src_tensor.host_view(), 17, 10, -10, 0); cutlass::reference::host::BlockFillSequential( dst_tensor.host_data(), dst_tensor.capacity()); dst_tensor.sync_device(); src_tensor.sync_device(); // Execute a tensor reduction over rank 2 (the 'W' dimension is reduced; NHWC => NHC) TensorReduction reduction(src_tensor.extent(), 2); cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size()); cutlass::Status status = reduction.reduce( dst_tensor.device_ref(), src_tensor.device_ref(), device_workspace.get(), reduction_identity ); EXPECT_EQ(status, cutlass::Status::kSuccess); EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess); // Reference check dst_tensor.sync_host(); typename TensorReduction::ReductionOp reduction_op; for (int n = 0; n < src_tensor.extent().n(); ++n) { for (int h = 0; h < src_tensor.extent().h(); ++h) { for (int c = 0; c < src_tensor.extent().c(); ++c) { ElementCompute w_accum = reduction_identity; for (int w = 0; w < src_tensor.extent().w(); ++w) { w_accum = reduction_op(w_accum, ElementCompute(src_tensor.at({n, h, w, c}))); } ElementCompute got = ElementCompute(dst_tensor.at({n, h, 0, c})); bool equal = (w_accum == got); EXPECT_TRUE(equal); if (!equal) { std::cerr << "Error at location (" << n << ", " << h << ", 0, " << c << ")" << std::endl; std::cerr << " expected: " << w_accum << std::endl << " got: " << got << std::endl; std::cerr << "Problem: " << src_tensor.extent() << " -> " << dst_tensor.extent() << std::endl; std::cerr << " Grid: " << reduction.reduction_strided.grid_shape << "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl << " Final: " << reduction.reduction_strided.grid_final << "\n Block: " << reduction.reduction_strided.threadblock_final << "\n"; return false; } } } } } } } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x8_f16x8) { int const kV = 8; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x2_f16x2) { int const kV = 2; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x1_f16x1) { int const kV = 1; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_s32x4) { int const kV = 4; using Element = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<Element>; using TensorReduction = cutlass::reduction::device::TensorReduction< Element, Element, Layout, Functor, kV, Element >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_cf32) { int const kV = 1; using ElementOutput = cutlass::complex<float>; using ElementSource = cutlass::complex<float>; using ElementCompute = cutlass::complex<float>; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_maximum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::maximum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>( -std::numeric_limits<float>::max() )); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_minimum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::minimum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(std::numeric_limits<float>::max())); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_XOR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_xor<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_AND_w_s32) { int const kV = 1; using ElementOutput = unsigned; using ElementSource = unsigned; using ElementCompute = unsigned; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(0xffffffff)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_OR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/reduction/device/tensor_reduce_strided.cu/0
{ "file_path": "cutlass/test/unit/reduction/device/tensor_reduce_strided.cu", "repo_id": "cutlass", "token_count": 5747 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "../common/cutlass_unit_test.h" #include "cutlass/util/device_rmsnorm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/constants.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" using ElementType = cutlass::half_t; using Layout = cutlass::layout::RowMajor; void rmsnorm_host(cutlass::MatrixCoord tensor_size, cutlass::TensorRef<ElementType, Layout> output, cutlass::TensorRef<ElementType, Layout> input, cutlass::TensorRef<ElementType, Layout> weight, float epsilon) { const int M = tensor_size.row(); const int N = tensor_size.column(); for (int m = 0; m < M; ++m) { float square_sum{0}; for (int n = 0; n < N; ++n) { float inp = static_cast<float>(input.at({m, n})); square_sum += inp * inp; } float sq_mean = square_sum / (float)N; float sqrt_var = cutlass::fast_sqrt(sq_mean + epsilon); for (int n = 0; n < N; ++n) { float inp = static_cast<float>(input.at({m, n})); float g = static_cast<float>(weight.at({0, n})); float res_fp32 = inp / sqrt_var * g; output.at({m, n}) = ElementType(res_fp32); } } } void run_test(int M, int N) { cutlass::HostTensor<ElementType, Layout> input, output_ref, output, weight; input.reset({M, N}); output.reset({M, N}); output_ref.reset({M, N}); weight.reset({1, N}); const unsigned seed = 2022; cutlass::reference::host::TensorFillRandomUniform(input.host_view(), seed, ElementType(5), ElementType(-5), 0); cutlass::reference::host::TensorFillRandomUniform(weight.host_view(), seed, ElementType(5), ElementType(-5), 0); input.sync_device(); weight.sync_device(); rmsnorm_host({M, N}, output_ref.host_ref(), input.host_ref(), weight.host_ref(), (float)1e-5); cutlass::rmsnorm({M, N}, output.device_ref(), input.device_ref(), weight.device_ref(), NULL, (float)1e-5L); output.sync_host(); float max_abs_diff = -1; float mean_abs_diff = 0; for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { auto diff = abs(static_cast<float>(output_ref.at({m, n}) - output.at({m, n}))); mean_abs_diff += diff; max_abs_diff = max(max_abs_diff, diff); } } mean_abs_diff /= float(M * N); EXPECT_TRUE(max_abs_diff < 0.001f && mean_abs_diff < 0.001f) << "Max absolute difference : " << max_abs_diff << "\n" << "Mean absolute difference: " << mean_abs_diff; } TEST(RMSNorm, 16x1024) { run_test(16, 1024); } TEST(RMSNorm, 1x127) { run_test(1, 127); }
cutlass/test/unit/util/rms_norm.cu/0
{ "file_path": "cutlass/test/unit/util/rms_norm.cu", "repo_id": "cutlass", "token_count": 1619 }
65
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for all GEMM operation kinds in CUTLASS Library. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/gemm/device/gemm_batched.h" #include "cutlass/gemm/device/gemm_array.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h" #include "cutlass/library/library.h" #include "library_internal.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmOperationBase : public Operation { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; // assuming all tensors use same type for StrideIndex using StrideIndex = typename Operator::LayoutA::Index; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; protected: /// GemmDescription description_; public: /// Constructor GemmOperationBase(char const *name = "unknown_gemm") { description_.name = name; description_.provider = Provider::kCUTLASS; description_.kind = OperationKind::kGemm; description_.gemm_kind = GemmKind::kGemm; description_.tile_description.threadblock_shape = make_Coord( Operator::ThreadblockShape::kM, Operator::ThreadblockShape::kN, Operator::ThreadblockShape::kK); description_.tile_description.threadblock_stages = Operator::kStages; description_.tile_description.warp_count = make_Coord( Operator::GemmKernel::WarpCount::kM, Operator::GemmKernel::WarpCount::kN, Operator::GemmKernel::WarpCount::kK); description_.tile_description.math_instruction.instruction_shape = make_Coord( Operator::InstructionShape::kM, Operator::InstructionShape::kN, Operator::InstructionShape::kK); description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; description_.tile_description.math_instruction.opcode_class = OpcodeClassMap<typename Operator::OperatorClass>::kId; description_.tile_description.math_instruction.math_operation = MathOperationMap<typename Operator::MathOperator>::kId; description_.tile_description.minimum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin; description_.tile_description.maximum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax; description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA); description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB); description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC); description_.D = make_TensorDescription<ElementD, LayoutD>(Operator::kAlignmentC); description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; description_.split_k_mode = SplitKMode::kNone; description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId; description_.transform_B = ComplexTransformMap<Operator::kTransformB>::kId; } /// Returns the description of the GEMM operation virtual OperationDescription const & description() const { return description_; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.gemm_kind = GemmKind::kGemm; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, GemmConfiguration const *configuration) { operator_args.problem_size = configuration->problem_size; operator_args.ref_A = {nullptr, configuration->lda}; operator_args.ref_B = {nullptr, configuration->ldb}; operator_args.ref_C = {nullptr, configuration->ldc}; operator_args.ref_D = {nullptr, configuration->ldd}; operator_args.split_k_slices = configuration->split_k_slices; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, GemmArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A)); operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B)); operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C)); operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D)); return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { GemmConfiguration const *configuration = static_cast<GemmConfiguration const *>(configuration_ptr); GemmArguments const *arguments = static_cast<GemmArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } return Operator::get_workspace_size(args); } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; return op->initialize(args, device_workspace, stream); } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<GemmArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } return op->run(stream); } void print_operator_args(OperatorArguments &operator_args) const { #if 0 std::cout << "GemmOperation::OperatorArguments" << std::endl; std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl; std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl; std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl; std::cout << " beta: " << operator_args.epilogue.beta << std::endl; std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl; std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl; std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl; std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl; std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl; std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl; std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl; #endif } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmSparseOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementE = typename Operator::ElementE; using LayoutE = typename Operator::LayoutE; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmSparseOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.kind = OperationKind::kSparseGemm; this->description_.gemm_kind = GemmKind::kSparse; this->description_.E = make_TensorDescription<ElementE, LayoutE>(Operator::kAlignmentE); } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, SparseGemmConfiguration const *configuration) { operator_args.problem_size = configuration->problem_size; operator_args.ref_A = {nullptr, configuration->lda}; operator_args.ref_B = {nullptr, configuration->ldb}; operator_args.ref_C = {nullptr, configuration->ldc}; operator_args.ref_D = {nullptr, configuration->ldd}; operator_args.ref_E = {nullptr, configuration->lde}; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, SparseGemmArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A)); operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B)); operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C)); operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D)); operator_args.ref_E.reset(static_cast<ElementE const *>(arguments->E)); return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { SparseGemmConfiguration const *configuration = static_cast<SparseGemmConfiguration const *>(configuration_ptr); SparseGemmArguments const *arguments = static_cast<SparseGemmArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<SparseGemmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } return Operator::get_workspace_size(args); } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<SparseGemmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; return op->initialize(args, device_workspace, stream); } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<SparseGemmArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } return op->run(stream); } void print_operator_args(OperatorArguments &operator_args) const { #if 0 std::cout << "GemmOperation::OperatorArguments" << std::endl; std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl; std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl; std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl; std::cout << " beta: " << operator_args.epilogue.beta << std::endl; std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl; std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl; std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl; std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl; std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl; std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl; std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl; #endif } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmUniversalOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmUniversalOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.gemm_kind = GemmKind::kUniversal; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, GemmUniversalConfiguration const *configuration) { operator_args.mode = configuration->mode; operator_args.problem_size = configuration->problem_size; operator_args.batch_count = configuration->batch_count; operator_args.lda = (configuration->lda); operator_args.ldb = (configuration->ldb); operator_args.ldc = (configuration->ldc); operator_args.ldd = (configuration->ldd); return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, GemmUniversalArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } // update arguments operator_args.ptr_A = arguments->A; operator_args.ptr_B = arguments->B; operator_args.ptr_C = arguments->C; operator_args.ptr_D = arguments->D; operator_args.batch_stride_A = arguments->batch_stride_A; operator_args.batch_stride_B = arguments->batch_stride_B; operator_args.batch_stride_C = arguments->batch_stride_C; operator_args.batch_stride_D = arguments->batch_stride_D; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { GemmUniversalConfiguration const *configuration = static_cast<GemmUniversalConfiguration const *>(configuration_ptr); GemmUniversalArguments const *arguments = static_cast<GemmUniversalArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmUniversalConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } status = update_arguments_( args, static_cast<GemmUniversalArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmUniversalConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<GemmUniversalArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } status = op->run(stream); return status; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmPlanarComplexOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmPlanarComplexOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.gemm_kind = GemmKind::kPlanarComplex; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, GemmPlanarComplexConfiguration const *configuration) { operator_args.mode = cutlass::gemm::GemmUniversalMode::kBatched; operator_args.problem_size = configuration->problem_size; operator_args.batch_count = configuration->batch_count; operator_args.lda_real = configuration->lda_real; operator_args.lda_imag = configuration->lda_imag; operator_args.ldb_real = configuration->ldb_real; operator_args.ldb_imag = configuration->ldb_imag; operator_args.ldc_real = configuration->ldc_real; operator_args.ldc_imag = configuration->ldc_imag; operator_args.ldd_real = configuration->ldd_real; operator_args.ldd_imag = configuration->ldd_imag; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, GemmPlanarComplexArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha), *static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha), static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } // update arguments operator_args.ptr_A_real = arguments->A_real; operator_args.ptr_A_imag = arguments->A_imag; operator_args.ptr_B_real = arguments->B_real; operator_args.ptr_B_imag = arguments->B_imag; operator_args.ptr_C_real = arguments->C_real; operator_args.ptr_C_imag = arguments->C_imag; operator_args.ptr_D_real = arguments->D_real; operator_args.ptr_D_imag = arguments->D_imag; operator_args.batch_stride_A = arguments->batch_stride_A_real; operator_args.batch_stride_A_imag = arguments->batch_stride_A_imag; operator_args.batch_stride_B = arguments->batch_stride_B_real; operator_args.batch_stride_B_imag = arguments->batch_stride_B_imag; operator_args.batch_stride_C = arguments->batch_stride_C_real; operator_args.batch_stride_C_imag = arguments->batch_stride_C_imag; operator_args.batch_stride_D = arguments->batch_stride_D_real; operator_args.batch_stride_D_imag = arguments->batch_stride_D_imag; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { GemmPlanarComplexConfiguration const *configuration = static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr); GemmPlanarComplexArguments const *arguments = static_cast<GemmPlanarComplexArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<GemmPlanarComplexArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } status = op->run(stream); return status; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmPlanarComplexArrayOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmPlanarComplexArrayOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.gemm_kind = GemmKind::kPlanarComplexArray; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, GemmPlanarComplexArrayConfiguration const *configuration) { operator_args.mode = cutlass::gemm::GemmUniversalMode::kArray; operator_args.problem_size = configuration->problem_size; operator_args.batch_count = configuration->batch_count; operator_args.lda_real = configuration->lda_real; operator_args.lda_imag = configuration->lda_imag; operator_args.ldb_real = configuration->ldb_real; operator_args.ldb_imag = configuration->ldb_imag; operator_args.ldc_real = configuration->ldc_real; operator_args.ldc_imag = configuration->ldc_imag; operator_args.ldd_real = configuration->ldd_real; operator_args.ldd_imag = configuration->ldd_imag; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, GemmPlanarComplexArrayArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha), *static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha), static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } // update arguments operator_args.ptr_A_real = arguments->A_real; operator_args.ptr_A_imag = arguments->A_imag; operator_args.ptr_B_real = arguments->B_real; operator_args.ptr_B_imag = arguments->B_imag; operator_args.ptr_C_real = arguments->C_real; operator_args.ptr_C_imag = arguments->C_imag; operator_args.ptr_D_real = arguments->D_real; operator_args.ptr_D_imag = arguments->D_imag; operator_args.ptr_M = arguments->M; operator_args.ptr_N = arguments->N; operator_args.ptr_K = arguments->K; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { GemmPlanarComplexArrayConfiguration const *configuration = static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr); GemmPlanarComplexArrayArguments const *arguments = static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } status = op->run(stream); return status; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class GemmGroupedOperation : public GemmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementD = ElementC; using LayoutD = LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor GemmGroupedOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) { this->description_.gemm_kind = GemmKind::kGrouped; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &op_args, GemmGroupedConfiguration const *config) { op_args.problem_count = config->problem_count; op_args.threadblock_count = config->threadblock_count; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &op_args, GemmGroupedArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); op_args.output_op = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice) { typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); op_args.output_op = params; } else { return Status::kErrorInvalidProblem; } op_args.problem_sizes = arguments->problem_sizes; op_args.ptr_A = static_cast<ElementA **>(arguments->ptr_A); op_args.ptr_B = static_cast<ElementB **>(arguments->ptr_B); op_args.ptr_C = static_cast<ElementC **>(arguments->ptr_C); op_args.ptr_D = static_cast<ElementD **>(arguments->ptr_D); op_args.lda = arguments->lda; op_args.ldb = arguments->ldb; op_args.ldc = arguments->ldc; op_args.ldd = arguments->ldd; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { GemmGroupedConfiguration const *configuration = static_cast<GemmGroupedConfiguration const *>(configuration_ptr); GemmGroupedArguments const *arguments = static_cast<GemmGroupedArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmGroupedConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } status = update_arguments_( args, static_cast<GemmGroupedArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<GemmGroupedConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<GemmGroupedArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args); if (status != Status::kSuccess) { return status; } status = op->run(stream); return status; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/gemm_operation.h/0
{ "file_path": "cutlass/tools/library/src/gemm_operation.h", "repo_id": "cutlass", "token_count": 14489 }
66
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for all TRMM operation kinds in CUTLASS Library. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/device/trmm.h" #include "cutlass/gemm/kernel/default_trmm_universal.h" #include "cutlass/gemm/kernel/trmm_universal.h" #include "cutlass/library/library.h" #include "library_internal.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class TrmmOperationBase : public Operation { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; static SideMode const kSideMode = Operator::kSideMode; static FillMode const kFillMode = Operator::kFillMode; static DiagType const kDiagType = Operator::kDiagType; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; protected: /// TrmmDescription description_; public: /// Constructor TrmmOperationBase(char const *name = "unknown_trmm") { description_.name = name; description_.provider = Provider::kCUTLASS; description_.kind = OperationKind::kTrmm; description_.trmm_kind = TrmmKind::kUniversal; description_.side_mode = kSideMode; description_.fill_mode = kFillMode; description_.diag_type = kDiagType; description_.tile_description.threadblock_shape = make_Coord( Operator::ThreadblockShape::kM, Operator::ThreadblockShape::kN, Operator::ThreadblockShape::kK); description_.tile_description.threadblock_stages = Operator::kStages; description_.tile_description.warp_count = make_Coord( Operator::TrmmKernel::WarpCount::kM, Operator::TrmmKernel::WarpCount::kN, Operator::TrmmKernel::WarpCount::kK); description_.tile_description.math_instruction.instruction_shape = make_Coord( Operator::InstructionShape::kM, Operator::InstructionShape::kN, Operator::InstructionShape::kK); description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; description_.tile_description.math_instruction.opcode_class = OpcodeClassMap<typename Operator::OperatorClass>::kId; description_.tile_description.math_instruction.math_operation = MathOperationMap<typename Operator::Operator>::kId; description_.tile_description.minimum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin; description_.tile_description.maximum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax; description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA); description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB); description_.D = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC); description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; description_.split_k_mode = SplitKMode::kNone; description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId; } /// Returns the description of the TRMM operation virtual OperationDescription const & description() const { return description_; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class TrmmOperation : public TrmmOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; static SideMode const kSideMode = Operator::kSideMode; static FillMode const kFillMode = Operator::kFillMode; static DiagType const kDiagType = Operator::kDiagType; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; using OperatorArguments = typename Operator::Arguments; public: /// Constructor TrmmOperation(char const *name = "unknown_trmm"): TrmmOperationBase<Operator_>(name) { this->description_.trmm_kind = TrmmKind::kUniversal; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, TrmmConfiguration const *configuration) { //operator_args.mode = configuration->mode; operator_args.problem_size = configuration->problem_size; operator_args.batch_count = configuration->batch_count; operator_args.lda = int(configuration->lda); operator_args.ldb = int(configuration->ldb); operator_args.ldd = int(configuration->ldd); return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, TrmmArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } // update arguments operator_args.ptr_A = arguments->A; operator_args.ptr_B = arguments->B; operator_args.batch_stride_A = arguments->batch_stride_A; operator_args.batch_stride_B = arguments->batch_stride_B; operator_args.ptr_D = arguments->D; operator_args.batch_stride_D = arguments->batch_stride_D; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { TrmmConfiguration const *configuration = static_cast<TrmmConfiguration const *>(configuration_ptr); TrmmArguments const *arguments = static_cast<TrmmArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<TrmmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<TrmmConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<TrmmArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); bool need_swapped_matrices = (kSideMode == SideMode::kLeft && std::is_same<typename Operator::LayoutC, layout::ColumnMajor>::value) || (kSideMode == SideMode::kRight && std::is_same<typename Operator::LayoutC, layout::RowMajor>::value); if (need_swapped_matrices) { status = op->update(args.swapped_matrices(), device_workspace); } else { status = op->update(args, device_workspace); } if (status != Status::kSuccess) { return status; } status = op->run(stream); return status; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/trmm_operation.h/0
{ "file_path": "cutlass/tools/library/src/trmm_operation.h", "repo_id": "cutlass", "token_count": 3752 }
67
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Class performing output during profiling */ #pragma once #include <vector> #include <fstream> // CUTLASS Profiler includes #include "options.h" #include "enumerated_types.h" #include "performance_result.h" // CUTLASS Library includes #include "cutlass/library/library.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// class PerformanceReport { private: /// Reference to options Options const &options_; /// Operation kind library::OperationKind op_kind_; /// Operation file name containing performance report of op_kind std::string op_file_name_; /// Output file containing results std::ofstream output_file_; /// Operation file name containing junit performance report of op_kind std::string op_junit_file_name_; /// Output file containing junit results std::ofstream junit_output_file_; /// Flag indicating the performance report is valid bool good_; /// Vector of argument names std::vector<std::string> argument_names_; /// Counter uniquely identifying problem within the report size_t problem_index_; /// Collection of all results PerformanceResultVector concatenated_results_; public: PerformanceReport(Options const &options, std::vector<std::string> const &argument_names, library::OperationKind const &op_kind); ~PerformanceReport(); bool good() const { return good_; } void next_problem(); void append_result(PerformanceResult result); void sort_results(PerformanceResultVector &results); void append_results(PerformanceResultVector const &results); public: /// Prints the CSV header std::ostream & print_csv_header_(std::ostream &out); /// Prints the CSV std::ostream & print_result_csv_(std::ostream &out, PerformanceResult const &result); /// @defgroup jUnit Result Generation /// Functions related to generation of the jUnit results /// @{ std::ostream & print_junit_header_(std::ostream &out); std::ostream & print_junit_result_(std::ostream &out, PerformanceResult const &result); std::ostream & print_junit_footer_(std::ostream &out); /// @} /// Prints the result in human readable form std::ostream & print_result_pretty_( std::ostream &out, PerformanceResult const &result, bool use_shell_coloring = true); }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass
cutlass/tools/profiler/include/cutlass/profiler/performance_report.h/0
{ "file_path": "cutlass/tools/profiler/include/cutlass/profiler/performance_report.h", "repo_id": "cutlass", "token_count": 1135 }
68
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Provides several functions for filling tensors with data. */ #include "cutlass/profiler/enumerated_types.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const *text; char const *pretty; ExecutionMode enumerant; } ExecutionMode_enumerants[] = { {"profile", "Profile", ExecutionMode::kProfile}, {"dry_run", "Dry run", ExecutionMode::kDryRun}, {"dry", "dry run", ExecutionMode::kDryRun}, {"trace", "Trace", ExecutionMode::kTrace}, {"enumerate", "Enumerate", ExecutionMode::kEnumerate} }; /// Converts a ExecutionMode enumerant to a string char const *to_string(ExecutionMode mode, bool pretty) { for (auto const & possible : ExecutionMode_enumerants) { if (mode == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a ExecutionMode enumerant from a string template <> ExecutionMode from_string<ExecutionMode>(std::string const &str) { for (auto const & possible : ExecutionMode_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return ExecutionMode::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const *text; char const *pretty; AlgorithmMode enumerant; } AlgorithmMode_enumerants[] = { {"matching", "Matching", AlgorithmMode::kMatching}, {"best", "Best", AlgorithmMode::kBest}, {"default", "Default", AlgorithmMode::kDefault} }; /// Converts a ExecutionMode enumerant to a string char const *to_string(AlgorithmMode mode, bool pretty) { for (auto const & possible : AlgorithmMode_enumerants) { if (mode == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a ExecutionMode enumerant from a string template <> AlgorithmMode from_string<AlgorithmMode>(std::string const &str) { for (auto const & possible : AlgorithmMode_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return AlgorithmMode::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const *text; char const *pretty; Disposition enumerant; } Disposition_enumerants[] = { {"passed", "Passed", Disposition::kPassed}, {"failed", "Failed", Disposition::kFailed}, {"not_run", "Not run", Disposition::kNotRun}, {"not_verified", "Not verified", Disposition::kNotVerified}, {"invalid_problem", "Invalid problem", Disposition::kInvalidProblem}, {"not_supported", "Not supported", Disposition::kNotSupported}, {"incorrect", "Incorrect", Disposition::kIncorrect} }; /// Converts a Disposition enumerant to a string char const *to_string(Disposition disposition, bool pretty) { for (auto const & possible : Disposition_enumerants) { if (disposition == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a Disposition enumerant from a string template <> Disposition from_string<Disposition>(std::string const &str) { for (auto const & possible : Disposition_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return Disposition::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const *text; char const *pretty; SaveWorkspace enumerant; } SaveWorkspace_enumerants[] = { {"never", "Never", SaveWorkspace::kNever}, {"incorrect", "Incorrect", SaveWorkspace::kIncorrect}, {"always", "Always", SaveWorkspace::kAlways} }; /// Converts a SaveWorkspace enumerant to a string char const *to_string(SaveWorkspace save_option, bool pretty) { for (auto const & possible : SaveWorkspace_enumerants) { if (save_option == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a SaveWorkspace enumerant from a string template <> SaveWorkspace from_string<SaveWorkspace>(std::string const &str) { for (auto const & possible : SaveWorkspace_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return SaveWorkspace::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const *text; char const *pretty; ArgumentTypeID enumerant; } ArgumentTypeID_enumerants[] = { {"scalar", "Scalar", ArgumentTypeID::kScalar}, {"int", "Integer", ArgumentTypeID::kInteger}, {"tensor", "Tensor", ArgumentTypeID::kTensor}, {"batched_tensor", "BatchedTensor", ArgumentTypeID::kBatchedTensor}, {"struct", "Struct", ArgumentTypeID::kStructure}, {"enum", "Enumerated type", ArgumentTypeID::kEnumerated} }; /// Converts a ArgumentTypeID enumerant to a string char const *to_string(ArgumentTypeID type, bool pretty) { for (auto const & possible : ArgumentTypeID_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a ArgumentTypeID enumerant from a string template <> ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str) { for (auto const & possible : ArgumentTypeID_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return ArgumentTypeID::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/enumerated_types.cpp/0
{ "file_path": "cutlass/tools/profiler/src/enumerated_types.cpp", "repo_id": "cutlass", "token_count": 2582 }
69
/****************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * Utility for parsing command line arguments */ #include <iostream> #include <limits> #include <sstream> #include <string> #include <vector> #include <cuda_runtime.h> #include "cutlass/cutlass.h" namespace cutlass { /****************************************************************************** * command_line ******************************************************************************/ /** * Utility for parsing command line arguments */ struct CommandLine { std::vector<std::string> keys; std::vector<std::string> values; std::vector<std::string> args; /** * Constructor */ CommandLine(int argc, const char** argv) { using namespace std; for (int i = 1; i < argc; i++) { string arg = argv[i]; if ((arg[0] != '-') || (arg[1] != '-')) { args.push_back(arg); continue; } string::size_type pos; string key, val; if ((pos = arg.find('=')) == string::npos) { key = string(arg, 2, arg.length() - 2); val = ""; } else { key = string(arg, 2, pos - 2); val = string(arg, pos + 1, arg.length() - 1); } keys.push_back(key); values.push_back(val); } } /** * Checks whether a flag "--<flag>" is present in the commandline */ bool check_cmd_line_flag(const char* arg_name) const { using namespace std; for (int i = 0; i < int(keys.size()); ++i) { if (keys[i] == string(arg_name)) return true; } return false; } /** * Returns number of naked (non-flag and non-key-value) commandline parameters */ size_t num_naked_args() const { return args.size(); } /** * Print naked (non-flag and non-key-value) commandline parameters */ void print_naked_args(std::ostream &out) const { for (auto arg : args) { out << " " << arg <<"\n"; } } /** * Returns the commandline parameter for a given index (not including flags) */ template <typename value_t> void get_cmd_line_argument(size_t index, value_t& val) const { using namespace std; if (index < args.size()) { istringstream str_stream(args[index]); str_stream >> val; } } /** * Obtains the boolean value specified for a given commandline parameter --<flag>=<bool> */ void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const { val = _default; if (check_cmd_line_flag(arg_name)) { std::string value; get_cmd_line_argument(arg_name, value); val = !(value == "0" || value == "false"); } } /** * Obtains the value specified for a given commandline parameter --<flag>=<value> */ template <typename value_t> void get_cmd_line_argument(const char* arg_name, value_t& val) const { get_cmd_line_argument(arg_name, val, val); } /** * Obtains the value specified for a given commandline parameter --<flag>=<value> */ template <typename value_t> void get_cmd_line_argument(const char* arg_name, value_t& val, value_t const& _default) const { using namespace std; val = _default; for (int i = 0; i < int(keys.size()); ++i) { if (keys[i] == string(arg_name)) { istringstream str_stream(values[i]); str_stream >> val; } } } /** * Returns the values specified for a given commandline parameter --<flag>=<value>,<value>* */ template <typename value_t> void get_cmd_line_arguments(const char* arg_name, std::vector<value_t>& vals, char sep = ',') const { using namespace std; if (check_cmd_line_flag(arg_name)) { // Clear any default values vals.clear(); // Recover from multi-value string for (size_t i = 0; i < keys.size(); ++i) { if (keys[i] == string(arg_name)) { string val_string(values[i]); separate_string(val_string, vals, sep); } } } } /** * Returns the values specified for a given commandline parameter * --<flag>=<value>,<value_start:value_end>* */ void get_cmd_line_argument_pairs(const char* arg_name, std::vector<std::pair<std::string, std::string> >& tokens, char delim = ',', char sep = ':') const { if (check_cmd_line_flag(arg_name)) { std::string value; get_cmd_line_argument(arg_name, value); tokenize(tokens, value, delim, sep); } } /** * Returns a list of ranges specified for a given commandline parameter * --<flag>=<key:value>,<key:value>* */ void get_cmd_line_argument_ranges(const char* arg_name, std::vector<std::vector<std::string> >& vals, char delim = ',', char sep = ':') const { std::vector<std::string> ranges; get_cmd_line_arguments(arg_name, ranges, delim); for (std::vector<std::string>::const_iterator range = ranges.begin(); range != ranges.end(); ++range) { std::vector<std::string> range_vals; separate_string(*range, range_vals, sep); vals.push_back(range_vals); } } /** * The number of pairs parsed */ int parsed_argc() const { return (int)keys.size(); } //------------------------------------------------------------------------- // Utility functions //------------------------------------------------------------------------- /// Tokenizes a comma-delimited list of string pairs delimited by ':' static void tokenize(std::vector<std::pair<std::string, std::string> >& tokens, std::string const& str, char delim = ',', char sep = ':') { // Home-built to avoid Boost dependency size_t s_idx = 0; size_t d_idx = std::string::npos; while (s_idx < str.size()) { d_idx = str.find_first_of(delim, s_idx); size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size()); size_t sep_idx = str.find_first_of(sep, s_idx); size_t offset = 1; if (sep_idx == std::string::npos || sep_idx >= end_idx) { sep_idx = end_idx; offset = 0; } std::pair<std::string, std::string> item( str.substr(s_idx, sep_idx - s_idx), str.substr(sep_idx + offset, end_idx - sep_idx - offset)); tokens.push_back(item); s_idx = end_idx + 1; } } /// Tokenizes a comma-delimited list of string pairs delimited by ':' static void tokenize(std::vector<std::string>& tokens, std::string const& str, char delim = ',', char sep = ':') { typedef std::vector<std::pair<std::string, std::string> > TokenVector; typedef TokenVector::const_iterator token_iterator; std::vector<std::pair<std::string, std::string> > token_pairs; tokenize(token_pairs, str, delim, sep); for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) { tokens.push_back(tok->first); } } template <typename value_t> static void separate_string(std::string const& str, std::vector<value_t>& vals, char sep = ',') { std::istringstream str_stream(str); std::string::size_type old_pos = 0; std::string::size_type new_pos = 0; // Iterate <sep>-delimited values value_t val; while ((new_pos = str.find(sep, old_pos)) != std::string::npos) { if (new_pos != old_pos) { str_stream.width(new_pos - old_pos); str_stream >> val; vals.push_back(val); } // skip over delimiter str_stream.ignore(1); old_pos = new_pos + 1; } // Read last value str_stream >> val; vals.push_back(val); } }; } // namespace cutlass
cutlass/tools/util/include/cutlass/util/command_line.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/command_line.h", "repo_id": "cutlass", "token_count": 3938 }
70
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cuda.h> #include <cute/util/debug.hpp> namespace cute { void device_init(int device_id, bool quiet = false) { cudaDeviceProp device_prop; std::size_t device_free_physmem; std::size_t device_total_physmem; CUTE_CHECK_ERROR(cudaSetDevice(device_id)); CUTE_CHECK_ERROR(cudaMemGetInfo(&device_free_physmem, &device_total_physmem)); CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id)); if (device_prop.major < 1) { fprintf(stderr, "Device does not support CUDA.\n"); exit(1); } //float device_giga_bandwidth = float(device_prop.memoryBusWidth) * device_prop.memoryClockRate * 2 / 8 / 1000 / 1000; if (!quiet) { printf("Using device %d: %s (SM%d, %d SMs)\n", device_id, device_prop.name, device_prop.major * 10 + device_prop.minor, device_prop.multiProcessorCount); fflush(stdout); } } /** * Convert the SM version (e.g. v7.0, v7.5) to the physical number of cores. */ inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct { int SM; // 0xMm (hexadecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128}, {0x70, 64}, {0x72, 64}, {0x75, 64}, {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf("MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } } // end namespace cute
cutlass/tools/util/include/cutlass/util/helper_cuda.hpp/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/helper_cuda.hpp", "repo_id": "cutlass", "token_count": 1388 }
71
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <curand_kernel.h> #include "cutlass/cutlass.h" namespace cutlass { namespace reference { namespace device { namespace kernel { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to initialize tensor to uniform random distribution template <typename T> __global__ void TensorInitializeUniform( Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { __shared__ curandState_t rng_state[1024]; uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); int c_idx = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = blockIdx.y * blockDim.x; tensor += s_idx * ldm + c_idx; for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { if (s_idx < dim_strided && c_idx < dim_contiguous) { double range = dist.uniform.max - dist.uniform.min; double rnd = curand_uniform(&rng_state[threadIdx.x]); rnd = dist.uniform.min + range * rnd; // Random values are cast to integer after scaling by a power of two to facilitate error // testing if (dist.int_scale >= 0) { rnd = double(int(rnd * double(1 << dist.int_scale))); *tensor = T(rnd / double(1 << dist.int_scale)); } else { *tensor = T(rnd); } tensor += ldm; } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to initialize tensor to uniform distribution template <typename T> __global__ void TensorInitializeGaussian( Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { __shared__ curandState_t rng_state[1024]; uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); int c_idx = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = blockIdx.y * blockDim.x; tensor += s_idx * ldm + c_idx; for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { if (s_idx < dim_strided && c_idx < dim_contiguous) { // Random values are cast to integer after scaling by a power of two to facilitate error // testing double rnd = curand_normal(&rng_state[threadIdx.x]); rnd = dist.gaussian.mean + dist.gaussian.stddev * rnd; if (dist.int_scale >= 0) { rnd = double(int(rnd * double(1 << dist.int_scale))); *tensor = T(rnd / double(1 << dist.int_scale)); } else { *tensor = T(rnd); } } } } /// Kernel to initialize tensor to an identity matrix template <typename T> __global__ void TensorInitializeLinear( Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { __shared__ curandState_t rng_state[1024]; uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); int c_idx = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = blockIdx.y * blockDim.x; tensor += s_idx * ldm + c_idx; for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { if (s_idx < dim_strided && c_idx < dim_contiguous) { *tensor = dist.linear.offset + dist.linear.delta_row * c_idx + dist.linear.delta_column * s_idx; } } } /// Kernel to initialize tensor to an identity matrix template <typename T> __global__ void TensorInitializeIdentity( Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) { __shared__ curandState_t rng_state[1024]; uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; curand_init(seed, gtid, 0, &rng_state[threadIdx.x]); int c_idx = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = blockIdx.y * blockDim.x; tensor += s_idx * ldm + c_idx; for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) { if (s_idx < dim_strided && c_idx < dim_contiguous) { *tensor = (c_idx == s_idx ? T(1) : T(0)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace device } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h", "repo_id": "cutlass", "token_count": 2183 }
72
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for Rank 2k update in host-side code. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" #include "cutlass/arch/mma.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" namespace cutlass { namespace reference { namespace host { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, FillMode FillModeC, typename ScalarType, typename ComputeType, typename InnerProductOp = multiply_add<ComputeType>, typename ConvertOp = NumericConverter<ElementC, ScalarType> > void compute_rank2k( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, ComputeType initial_accum) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); static_assert( FillModeC == FillMode::kLower || FillModeC == FillMode::kUpper, "Fill Mode can either be Lower or Upper."); using CompareOp = typename platform::conditional<(FillModeC == FillMode::kLower), std::greater_equal<int>, std::less_equal<int>>::type; // Note: batch is ignored. // Note: M is same as N for Rank 2k update int const N = problem_size.n(); int const K = problem_size.k(); // Blocking necessary to speedup reference implementation int const Nblock = 16; ConvertOp convert_op; InnerProductOp inner_product_op; CompareOp compare_op; for (int row_block = 0; row_block < N; row_block += Nblock) { for (int col_block = 0; col_block < N; col_block += Nblock) { ComputeType accum[Nblock][Nblock]; for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Nblock; i++) { accum[i][j] = initial_accum; } } for (int k_block = 0; k_block < K; ++k_block) { for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Nblock; i++) { int row = row_block + i; int col = col_block + j; if (row < N && col < N && compare_op(row, col)) { // A x B^T ElementA a = tensor_a.at(MatrixCoord(row, k_block)); ElementB b_t = tensor_b.at(MatrixCoord(col, k_block)); ComputeType compute_a(cast_if_scalar<ComputeType>(a)); ComputeType compute_b_t(cast_if_scalar<ComputeType>(b_t)); accum[i][j] = inner_product_op(compute_a, compute_b_t, accum[i][j]); // B x A^T ElementB b = tensor_b.at(MatrixCoord(row, k_block)); ElementA a_t = tensor_a.at(MatrixCoord(col, k_block)); ComputeType compute_b(cast_if_scalar<ComputeType>(b)); ComputeType compute_a_t(cast_if_scalar<ComputeType>(a_t)); accum[i][j] = inner_product_op(compute_b, compute_a_t, accum[i][j]); } } } } for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Nblock; i++) { int row = row_block + i; int col = col_block + j; MatrixCoord coord = MatrixCoord(row, col); if (row < N && col < N && ( (FillModeC == FillMode::kLower && row >= col) || (FillModeC == FillMode::kUpper && row <= col) ) ) { tensor_d.at(coord) = convert_op( alpha * ScalarType(accum[i][j]) + beta * ScalarType(tensor_c.at(coord))); } } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general Rank 2k update (tensors of rank=2) pointed to by TensorRef /// objects. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, FillMode FillModeC, typename ScalarType, typename ComputeType, typename InnerProductOp = multiply_add<ComputeType>, typename ConvertOp = NumericConverter<ElementC, ScalarType> > void compute_rank2k( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, ComputeType initial_accum) { compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType, ComputeType, InnerProductOp, ConvertOp>( problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c, initial_accum); } //////////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, FillMode FillModeC, typename ScalarType, typename ComputeType, typename InnerProductOp = cutlass::arch::OpMultiplyAdd > struct Rank2K; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for multiply-add template <typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, FillMode FillModeC, typename ScalarType, typename ComputeType> struct Rank2K<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType, ComputeType, arch::OpMultiplyAdd> { void operator()(gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, ComputeType initial_accum = ComputeType(0)) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType, ComputeType, multiply_add<ComputeType>>( problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum); } void operator()(gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, ComputeType initial_accum = ComputeType(0)) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType, ComputeType, multiply_add<ComputeType>>( problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace host } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h", "repo_id": "cutlass", "token_count": 3649 }
73
# NVIDIA CUTLASS Changelog ## [3.5.0](https://github.com/NVIDIA/cutlass/releases/tag/v3.5.0) (2024-04-09) - Implicit GEMM Convolutions targeting Hopper SM90A via WGMMA + [TMA im2col](./include/cute/atom/copy_traits_sm90_im2col.hpp) + Native implementation in CUTLASS 3.x using CuTe, mirroring the [same design hierarchy as that of GEMMs](./media/docs/gemm_api_3x.md). + Support for 1D, 2D, and 3D convolutions in a [rank-agnostic fashion](./include/cutlass/conv/convnd_problem_shape.hpp). + Support for [Fprop](./test/unit/conv/device_3x/fprop/sm90_conv3d_fprop_implicit_gemm_s8_s8_s32_tensorop_s32.cu), [Dgrad](./test/unit/conv/device_3x/dgrad/sm90_conv2d_dgrad_implicit_gemm_f16_f16_f32_tensorop_f16.cu), and [Wgrad](./test/unit/conv/device_3x/wgrad/sm90_conv1d_wgrad_implicit_gemm_f16_f16_f32_tensorop_f16.cu) algorithms + [CUTLASS profiler support](./python/cutlass_library/conv3x_emitter.py) for 2D and 3D convolutions implemented via the 3.x API. + NOTE: this is a beta release. Further updates to CUTLASS will include major performance improvements, feature enablement, and possible breaking changes to the API until 3.7 release. Your feedback is welcome on the design! - Support for [Ada (SM89) FP8 tensor cores via the 2.x API](./examples/58_ada_fp8_gemm/ada_fp8_gemm.cu). Requires CUDA 12.4 or newer. - [Ampere gather/scatter convolution example](./examples/59_ampere_gather_scatter_gemm/README.md) in CuTe and CUTLASS 3.x + Showcasing how custom kernels can be written and optimized using CUTLASS 3.x and CuTe and the general strategy for implementing convolutions as specializations of GETTs. + Implementation of a coarse grained sparse gather/scatter kernel achieving peak performance on Ampere class tensor cores. - 32x and 16x tile sizes are added to CUTLASS 2.x to improve the performance of narrow-tall and wide-short matrices. + [Ampere FP16 TN](./test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f32_sm80.cu) and [NT](./test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm80.cu#L227-L301), [Ampere INT8 TN](./test/unit/gemm/device/gemm_s8t_s8n_s8t_tensor_op_s32_sm80.cu#L392-L1342), [Ampere INT4 TN](./test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm80.cu#L372-L934). + [Turing FP16 TN](./test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f32_sm75.cu#L55-L394), [Turing INT8 TN](./test/unit/gemm/device/gemm_s8t_s8n_s8t_tensor_op_s32_sm75.cu#L166-L537), [Turing INT4 TN](./test/unit/gemm/device/gemm_s4t_s4n_s4t_tensor_op_s32_sm75.cu#L310-L564). - Updates to CuTe documentation for [`cute::Tensor<>`](./media/docs/cute/03_tensor.md), [MMA atoms](./media/docs/cute/0t_mma_atom.md), and an overhauled [CuTe GEMM tutorial series](./examples/cute/tutorial). - Extensions to CuTe to support [L2 prefetching](./include/cute/algorithm/prefetch.hpp) and [TMA store+reductions](./include/cute/arch/copy_sm90_tma.hpp#L1337). - Remove C++11 requirement on a few CUTLASS 2.x API header files. All CUTLASS files now require C++17. - Fixes to greatly reduce build warnings. - Updates and bugfixes from the community (thanks!) ## [3.4.1](https://github.com/NVIDIA/cutlass/releases/tag/v3.4.1) (2024-02-14) - Statically available [CUTLASS Version macros](./include/cutlass/version.h) that allow for handling API changes between CUTLASS releases on the users' side. - Improvements for Hopper [Group-GEMMs](./examples/57_hopper_grouped_gemm) and [Pointer-Array Batched GEMMs](./examples/56_hopper_ptr_array_batched_gemm). - Updates and bugfixes from the community (thanks!). ## [3.4.0](https://github.com/NVIDIA/cutlass/releases/tag/v3.4.0) (2024-01-12) * Expanded [Mixed-input Hopper GEMMs](./examples/55_hopper_mixed_dtype_gemm) support covering {16-bit, 8-bit} x {8-bit, 4-bit} input types with fast numerical converters and group scaling factors. * Performance improvements to [Mixed-input Hopper GEMMs](./examples/55_hopper_mixed_dtype_gemm) * Beta release of [Pointer-Array Batched GEMMs](./examples/56_hopper_ptr_array_batched_gemm) now available on Hopper GPUs utilizing TMA and WGMMA (requires CUDA 12.3 or above). * Beta release of [Group-GEMM](./examples/57_hopper_grouped_gemm) utilizing TMA and WGMMA (requires CUDA 12.3 or above). * [Ampere Sparse GEMM](./examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm_with_visitor.cu) supports Epilogue Visitor Tree (EVT) now. * NamedBarriers usability improvement and list of [ReservedNamedBarriers](./include/cutlass/arch/barrier.h) has been officially released. * Improved [CuTe documentation](./media/docs/cute/) including improved clarity and depth of [Quickstart](./media/docs/cute/00_quickstart.md), [CuTe Layout](./media/docs/cute/01_layout.md), and [CuTe Layout Algebra](./media/docs/cute/02_layout_algebra.md). Associated code comments, post-conditions, and details in [CuTe Core Unit Tests](./test/unit/cute/core/) also improved. ## [3.3](https://github.com/NVIDIA/cutlass/releases/tag/v3.3.0) (2023-10-31) * [Mixed-input Hopper GEMMs](./examples/55_hopper_mixed_dtype_gemm) support covering 16-bit x 8-bit input operand types. * [Mixed-input Ampere GEMMs](https://github.com/NVIDIA/cutlass/pull/1084) with support for canonical layouts (TN). The implementation supports upcast on operandB {fp16, bf16} x {s8, u8}, and upcast on operandA {s8, u8} x {fp16, bf16}. * [Copy Async based Hopper GEMMs](./test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_alignx_tensor_op_f32_warpspecialized_cooperative.cu) - which support lower than 16B aligned input tensors. * Kernel schedules and Builder support for mixed precision and Copy Async GEMMs with < 16B aligned input tensors. * Profiler support for lower-aligned Hopper GEMMs. * Performance Improvements to [Scatter-Gather Hopper Example](./examples/52_hopper_gather_scatter_fusion). * Sub-Byte type fixes and improvements. * EVT Support for RELU with Aux bitmap tensor store (used in dRELU). See [SM90 EVT fusions](./include/cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp) for details. * Fusion support for backprop fusions including drelu, dgelu, and dbias. * Support for void-C kernels and SM80 mixed-input GEMMs in the CUTLASS Python interface ## [3.2.2](https://github.com/NVIDIA/cutlass/releases/tag/v3.2.2) (2023-10-25) * Minor patch for issue/1138 ## [3.2.1](https://github.com/NVIDIA/cutlass/releases/tag/v3.2.1) (2023-09-22) * Python support SM90 Epilogue Visitor Tree (EVT) on top of the C++ support released in 3.2.0. * SM80 EVT support in C++ and Python. * Other SM90 epilogue improvements. * Splitting CUTLASS library into smaller units based on operation, arch and datatypes. See [1105](https://github.com/NVIDIA/cutlass/discussions/1105) for details. * Making `tools/library/scripts` packageable - `tools/library/scripts` is now moving to `python/cutlass_library`. See the Python [README](./python/README.md) for details. * SM90 TF32 kernel improvements for all layouts. * SM90 rasterization direction support in the CUTLASS profiler. * Improvement for CUTLASS profiler build times. * Remove Python-C++ bindings. ## [3.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v3.2.0) (2023-08-03) * New warp-specialized persistent FP8 GEMM kernel [kernel schedules](./include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp) and [mainloops](./include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized_fp8.hpp) targeting Hopper architecture that achieve great performance with TMA, WGMMA, and threadblock clusters. An example showcasing [Hopper warp-specialized FP8 GEMMs](./examples/54_hopper_fp8_warp_specialized_gemm). FP8 GEMMs come with a fast accumulation mode. When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results will not periodically be promoted to a higher precision. * New [Epilogue Visitor Tree (EVT)](./examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu) support for Hopper TMA epilogues. EVTs allows for user-defined customized epilogue fusion patterns without having to write a new epilogue. * [Stream-K](./include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp) feature for Hopper. Note that this is only a functional implementation of stream-K, and should not be used for performance comparison. Optimizations are expected in a future release. * Improved CTA rasterization and support for CTA swizzling for Hopper kernels using the [Tile Scheduler](./include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp). * Improved performance for [warp-specialized TensorFloat-32 (TF32) GEMM kernels](test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu) targeting Hopper TMA. * [Hopper GEMM+Permute](./examples/53_hopper_gemm_permute/53_hopper_gemm_permute.cu), an example of fusing tensor reordering (permutation) with GEMM mainloop or epilogue. * New CUTLASS 2D Convolution Python interface. New [example](./examples/python/03_basic_conv2d.ipynb) here. * Support for Windows (MSVC) builds. Tested with Visual Studio 2019 v16.11.27 on Windows 10.0. * Optimal performance using [**CUDA 12.2u1**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [3.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v3.1.0) (2023-04-14) * New CUTLASS Python interface that aims to provide an ease-of-use interface for instantiating, emitting, compiling, and running CUTLASS kernels via Python. More details [here](./python/README.md) and new [examples](./examples/python). * New [efficient epilogues](test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative.cu#L783) using TMA for Hopper. * Support for [fused epilogues](test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu), such Bias, ReLU and GELU, using the new efficient epilogues. * New [warp-specialized TensorFloat-32 (TF32) GEMM kernels](test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu) targeting Hopper TMA. * New [*warp-specialized persistent cooperative*](./include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp) kernel design that allows for larger tile sizes and improves performance on Hopper. * An [example](./examples/51_hopper_gett) showcasing GEMM-Like Tensor-Tensor Contraction (GETT) capability on Hopper. * Epilogue builders. Similar to mainloop builders (see [example 49](./examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu)), epilogue builders aim to generate the best-possible epilogue while exposing incremental opt-ins for greater customization. * Profiler support for overriding kernel and epilogue builder auto schedules for 3.x API kernels, allowing specific policies to be run in the CUTLASS profiler. * Performance optimizations for the [*warp-specialized persistent ping-pong*](./include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp) kernel. * Changes to the [GEMM API 3.x](./media/docs/gemm_api_3x.md), involving the host-facing arguments and the underlying `Params` structs. * [FMHA Backward Pass](./examples/41_fused_multi_head_attention/fused_multi_head_attention_backward.cu) from Meta xFormers. * [Streamk GEMM with Broadcast](./examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk_broadcast.cu) enables epilogue broadcast with StreamK GEMM. * [Batched B2B GEMM](./examples/13_two_tensor_op_fusion) now can run multiple Back-to-Back GEMM with the same problem size in parallel. * [Batched Strided GEMV](test/unit/gemm/device/gemv.cu) support both row major and column major input matrix. * [Permute + GEMM fusion](./examples/39_gemm_permute) can fuse Permute with following GEMM now. Before, we only support fusing GEMM with Permute in the epilogue. * [Row Broadcast](./include/cutlass/epilogue/threadblock/predicated_tile_iterator_row_broadcast.h) can be fused in the epilogue. * The GitHub branch is renamed from `master` to `main` in this release. * Optimal performance using [**CUDA 12.1**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [3.0.0](https://github.com/NVIDIA/cutlass/releases/tag/v3.0.0) (2023-01-23) * [CuTe](./media/docs/cute/00_quickstart.md), a [new core library and backend](./include/cute) for CUTLASS 3.0 that defines a single Layout vocabulary type and an associated algebra of layouts for a much more expressive and composable abstraction for tensors, sets of parallel agents, and operations by said agents on tensors. * [A new conceptual operation hierarchy](./media/docs/cutlass_3x_design.md) that replaces the architecture-centric hierarchy of CUTLASS 2.x and [documentation for CUTLASS 3.0's GEMM API changes](./media/docs/gemm_api_3x.md). * Strict API backwards compatibility that exposes both 2.x and 3.x API kernels through the same [`device::GemmUniversalAdapter`](./include/cutlass/gemm/device/gemm_universal_adapter.h) and [`kernel::GemmUniversal`](./include/cutlass/gemm/kernel/gemm_universal.hpp) types, allowing users to include both APIs in the same translation units. More information can be found in the [3.x backwards compatibility section](./media/docs/cutlass_3x_backwards_compatibility.md). * Updates to [Functionality](./media/docs/functionality.md) which directs users on which kernels are supported via CUTLASS-2 and CUTLASS-3. * Updates to [Compatibility](./README.md#compatibility) Section regarding supported compilers, operating systems, CUDA Toolkits, Hardware Architectures and [Target Architecture](./README.md#Target-Architecture). * New warp-specialized GEMM [kernel schedules](./include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp) and [mainloops](./include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) targeting Hopper architecture that achieve great performance with TMA, WGMMA, and threadblock clusters. * Extensions to CUTLASS profiler to support threadblock cluster shapes in library and profiler tile configurations. * [CUTLASS library integration](./tools/library/src/gemm_operation_3x.hpp) for 3.x API kernels built through the new `CollectiveBuilder` API, enabling CUTLASS profiler. * Support for [Hopper GEMMs](./examples/48_hopper_warp_specialized_gemm) through the new 3.0 API with CuTe-based exposure of the Hopper [Tensor Memory Accelerator](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor) and [WGMMA Tensor Core](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#asynchronous-warpgroup-level-matrix-instructions) features. * Set of examples that demonstrate the usage of the new 3.0 API to easily build GEMM kernels targeting Hopper: examples [48](./examples/48_hopper_warp_specialized_gemm), [49](./examples/49_hopper_gemm_schedules_with_collective_builder), and [50](./examples/50_hopper_gemm_with_epilogue_swizzle). ## [2.11.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.11.0) (2022-11-19) * [Stream-K](./examples/47_ampere_gemm_universal_streamk), which is a new general way to do split-K. It can not only improve performance, but can also significantly reduce the number of tile sizes that need to be profiled to find the best one. * [Fused multi-head attention Kernel](./examples/41_fused_multi_head_attention). It has two variants: one uses batched GEMM for the fixed sequence length, and the other one uses group GEMM for the variable sequence length. Both versions just need one kernel. * [Dual GEMM](./examples/45_dual_gemm), which can fuse A x B and A x C into one kernel. Two GEMMs has no producer-consumer dependency. * Hopper improves [double precision matrix multiplication](./test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) by 2x compared to Ampere at iso-clocks. It is supported since CUDA 11.8. * [BLAS3](./test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu) functions with Hoppers new double precision matrix multiplication instructions. * [ELL Block Sparse GEMM](./examples/43_ell_block_sparse_gemm), which uses an [ELL matrix](https://developer.nvidia.com/blog/accelerating-matrix-multiplication-with-block-sparse-format-and-nvidia-tensor-cores/) to describe the sparsity of A matrix. B and output matrices are still dense. The block size can be arbitary. * Optimized [Group Conv](./examples/42_ampere_tensorop_group_conv) for SingleGroup mode, which requires that the output channel per group is a multiple of Threadblock tile N. * [Optimized DepthWise Conv](./examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu). Two new modes are added * [kOptimized](./test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - use direct conv to compute instead of implicit GEMM. * The restrictions are: 1) input ,output channel and group number should be multiple of (128 / sizeof(input element)). 2) The input filter size should be the same as the template parameter configuration. * [kFixedStrideDilation](./test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_fixed_stride_dilation_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - which puts stride and dilation into templates to further improve the performance. In this mode, kernel persistents some inputs into register to squeeze more performance, so large filter/stride/dilation is not recommanded. * The restrictions are: 1) input, output channel and group number should be multiple of (128 / sizeof(input element)). 2) input filter size, stride, dilation should same as the template parameter configuration. * [Scripts](./examples/44_multi_gemm_ir_and_codegen) to fuse multiple back-to-back GEMM. Its implementation was discussed in a GTC'22 Spring [talk](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41606/). * [FP8 data type definition](./include/cutlass/float8.h) and [conversion routines](./include/cutlass/numeric_conversion.h#L1274-2115). * Updates and bugfixes from the community (thanks!). Big shout out to Meta's [xFormers](https://github.com/facebookresearch/xformers). * **Deprecation announcement:** CUTLASS plans to deprecate the following: * Maxwell and Pascal GPU architectures * Ubuntu 16.04 * CUDA 10.2 ## [2.10.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.10.0) (2022-08-23) * [CUTLASS Python](./examples/40_cutlass_py) now supports GEMM, CONV, Group GEMM for different data types as well as different epilogue flavours. * Optimizations for CUTLASS's [Grouped GEMM](./examples/24_gemm_grouped/gemm_grouped.cu) kernel. Threadblock scheduling part is improved. Some computation can be moved to the host side if applicable. [Grouped Syr2k](./examples/38_syr2k_grouped/syr2k_grouped.cu) kernels are added, too. * Optimizations for [GEMM+Softmax](./examples/35_gemm_softmax). All the reduction computation is fused into the previous GEMM. More template arguments are provided to fine tune the performance. * [Grouped GEMM for Multihead Attention](./examples/41_multi_head_attention). This general group gemm based MHA does not require the sequence length of all GEMMs to be the same which makes it most useful for natural language processing. * [GEMM + Layer norm fusion for Ampere](./examples/37_gemm_layernorm_gemm_fusion/) splits the layernorm into two parts and both of them can be fused into the GEMMs before and after separately. In addition to use square sum to compute variance of layernorm, [Shift-K](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data) is provided if square sum raise numerical issues. * [GEMM Epilogue Permutation Fusion](./examples/39_gemm_permute) can apply user provided permutation layout mapping in the GEMM epilogue. * [Grouped convolution targeting implicit GEMM](test/unit/conv/device/group_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) introduces the first group convolution implementation to CUTLASS. It is an Analytical implementation, not an Optimized. The restrictions are: 1) input and output channel number should be multiple of group number. 2) split-K is not supported. The implementation has 2 modes: * kSingleGroup: output channel per group is multiple of Threadblock tile N. * kMultipleGroup: Threadblock tile N is multiple of output channel per group. * [Depthwise separable convolution](test/unit/conv/device/depthwise_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) introduces the first depthwise convolution which is also Analytical for now. The restrictions are: 1) SIMT only 2) No split-K 3) input channel equals to output channel equals to group number. * Standalone [Layernorm](./tools/util/include/cutlass/util/device_layernorm.h) and [Pooling](./tools/util/include/cutlass/util/device_nhwc_pooling.h) kernels. * [Back-to-back GEMM/CONV](./examples/13_two_tensor_op_fusion) relaxes the requirement that the first GEMM K dimension needs to be the multiple of Threadblock Tile K dimension. * Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [2.9.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.9.0) (2022-04-21) * [First layer Convolution kernels](./test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) specialized for small channel counts and reduced alignment * [Few channels](./include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h) specialization for reduced alignment capabilities * [Fixed channels](./include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h) further specialized when channel count perfectly matches the access vector size * [Unit tests](./test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) * [Python-based instance emitter](./python/cutlass_library/generator.py) in the CUTLASS Library and support in the Profiler * [BLAS3](https://docs.nvidia.com/cuda/cublas/index.html#cublas-level-3-function-reference) operators accelerated by Tensor Cores * Supported types: f32, cf32, f64, cf64, tf32x3, complex tf32x3 * [HERK](./test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu) with [emitter](./python/cutlass_library/rank_k_operation.py) * [SYRK](./test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu) with [emitter](./python/cutlass_library/rank_k_operation.py) * [SYMM](./test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu) with [emitter](./python/cutlass_library/symm_operation.py) * [TRMM](./test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu) with [emitter](./python/cutlass_library/trmm_operation.py) * [Unit tests](./test/unit/gemm/device/testbed_rank_k_universal.h) * [CUTLASS Python](./examples/40_cutlass_py) demonstrating JIT compilation of CUTLASS kernels and a Python-based runtime using [CUDA Python](https://developer.nvidia.com/cuda-python) * [Python-based runtime](./tools/library/scripts/rt.py) interoperable with existing emitters * [GEMM + Softmax example](./examples/35_gemm_softmax) * [Gather and Scatter Fusion with GEMM](./examples/36_gather_scatter_fusion) can gather inputs and scatters outputs based on indices vectors in the same GEMM kernel. * It can select random rows in a row major matrix. * It can select random columns in a column major matrix. * [Back-to-back GEMM/CONV](./examples/13_two_tensor_op_fusion) fully supports buffering the first GEMM/CONV results in the shared memory for the latter one to use. It can eliminate register spill when the tile size is big. Additionally, bias vector add is supported in the first GEMM/CONV. * Supported kernels: GEMM and CONV. * Supported types: fp16 and int8. * Supported architectures: Turing and Ampere. * [Transposed Convolution](./examples/34_transposed_conv2d) (a.k.a Deconvolution) support which reuses Dgrad implementation. * [Utility functions](./tools/util/include/cutlass/util) that can pad NHWC and convert between NCHW and NHWC. * [Small alignment implicit gemm](https://github.com/NVIDIA/cutlass/issues/242) support for Fprop/Dgrad/Wgrad so that padding is no longer mandated to use tensor cores in these kernels. * Epilogue enhancement: * Eliminate bank conflicts in int8 tensor core kernels. * Half2 usage if epilogue compute type is fp16. * More activation functions: Silu, Hardswish, Leaky Relu. * New elementwise fusion pattern for [residual block](./include/cutlass/epilogue/thread/linear_combination_residual_block.h). * [Group GEMM](./examples/24_gemm_grouped) thread block number calculation fix which helps to launch the intended number of threadblocks to fully occupy the GPUs. * [Parallel GEMM splitk](https://github.com/NVIDIA/cutlass/pull/277) support in the CUTLASS profiler. * Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [2.8.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.8.0) (2021-11-19) * **TF32x3:** emulated single-precision using Tensor Cores * 45+ TFLOPs on NVIDIA A100 * [GEMM SDK example](./examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu) (real) * [COMPLEX GEMM SDK example](./examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_3xtf32_complex_gemm.cu) (complex) * [Implicit GEMM Convolution SDK example](./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu) * **Mainloop fusion for Convolution:** convolution with fused per-channel scale-bias-relu * [Conv Fprop SDK example](./examples/25_ampere_fprop_mainloop_fusion/ampere_fprop_mainloop_fusion.cu) * [Conv WGrad SDK example](./examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu) * [cutlass::conv::device::ImplicitGemmConvolutionFusion](./include/cutlass/conv/device/implicit_gemm_convolution_fusion.h) * **Grouped GEMM:** similar to batched GEMM with distinct problem size per group * [SDK example](./examples/24_gemm_grouped) with performance comparison with Batched Strided GEMM * [cutlass::gemm::device::GemmGrouped](./include/cutlass/gemm/device/gemm_grouped.h) * [Implicit GEMM Convolution fusion](./examples/13_two_tensor_op_fusion/) supports staging 1st convolution's output accumulator in the shared memory on Turing. This allows more flexible warp tile sizes and less regsiter pressue. * Optimal performance using [**CUDA 11.5**](https://developer.nvidia.com/cuda-downloads) * Updates from the community (thanks!) * **Deprecation announcement:** CUTLASS plans to deprecate the following: * Maxwell and Pascal GPU architectures * Ubuntu 16.04 * CUDA 10.2 ## [2.7.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.7.0) (2021-09-24) * Mainloop fusion for GEMM: [summation over A or B](./examples/23_ampere_gemm_operand_reduction_fusion/ampere_gemm_operand_reduction_fusion.cu) * [Strided DGRAD (optimized iterators)](./include/cutlass/conv/kernel/default_conv2d_dgrad.h) * [Half-precision GELU_taylor activation functions](./include/cutlass/epilogue/thread/activation.h#L196) * Use these when accumulation and epilogue compute types are all `cutlass::half_t` * Tuning and bug fixes to [fused GEMM + GEMM example](./examples/13_two_tensor_op_fusion/) * Support for smaller than 128b aligned Convolutions: [see examples](test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu#L272) * Caching of results to accelerate Convolution [unit tests](test/unit/conv/device/cache_testbed_output.h) * Can be enabled or disabled by running `cmake .. -DCUTLASS_TEST_ENABLE_CACHED_RESULTS=OFF` * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.6.1](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.1) (2021-09-03) * Arbitrary padding and striding for CUTLASS Strided DGRAD Convolution operator (Analytic Iterators) * Tuning for GEMMs fused with partial reductions * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.6.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.0) (2021-07-22) * Optimal performance when compiled with the [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit) * Adopt the new L2 prefetch feature in [cp.async](./include/cutlass/arch/memory.h) and [global load](./include/cutlass/arch/memory_sm80.h) * Fused operators with GEMM and Convolution * [Fused broadcast in epilogue](test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu) * [Fused partial reduction in epilogue](./test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu) * 64b tensor strides and leading dimensions support for GEMMs * Affine rank=2 matrix layouts * Row stride and column stride for matrices using [cutlass::layout::AffineRank2](./include/cutlass/layout/matrix.h) * Support [FP64 tensor core](./examples/18_ampere_fp64_tensorop_affine2_gemm/ampere_fp64_tensorop_affine2_gemm.cu) and SIMT GEMM. * [Batched GEMV](./test/unit/gemm/device/gemv.cu) preview implementation * [New strided Dgrad](test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) implementation * Accelerates over previous implementation by cutting down redundant math by 4x * Support using new `Dy` and `w` analytic iterators and existing `cutlass::conv::device::ImplicitGemmConvolution` interface * Quaternion-valued GEMM and Convolution in single- and double-precision (targeting CUDA Cores) * Updates to [quaternion.h](./include/cutlass/quaternion.h) and [functional.h](./include/cutlass/functional.h) * SDK Example for [GEMM](./examples/21_quaternion_gemm/quaternion_gemm.cu) and [Convolution](./examples/22_quaternion_conv/quaternion_conv.cu) * [Unit tests for GEMM](./test/unit/gemm/device/simt_qgemm_nn_sm50.cu) and [Convolution](./test/unit/conv/device/conv2d_fprop_implicit_gemm_qf32nhwc_qf32nhwc_qf32nhwc_simt_f32_sm50.cu) * Many improvements to the epilogue. * Provide an [option](./include/cutlass/epilogue/threadblock/epilogue.h) to not fully unroll the epilogue to reduce the code size and improve the performance when using complicated elementwise operations * Performance improvement for FP16 tensor core kernels * Bug fixes * Enhanced Clang support and the combination of Clang 13 and CUDA 11.4 can build and run kernels from Pascal and Ampere. * Updated minimum CUDA Toolkit requirement to 10.2 * [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit) recommended * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.5.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.5.0) (2021-02-26) * Tensor reductions * _m_-to-_n_ reductions of tensors with affine layout * [Specializations](./test/unit/reduction/device/tensor_reduce_contiguous.cu) for reductions including contiguous dimension * [Specializations](./test/unit/reduction/device/tensor_reduce_strided.cu) for reductions excluding contiguous dimension * Custom reduction functors such as `cutlass::logical_and` * Large tensor support, up to 2^63 elements (however, each dimension is limited to an extent of 2^31) * Optimizations for 3-D convolution * [Optimized tile iterators](./include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h) using precomputed delta table for 3-D convolution * Full coverage of [forward](test/unit/conv/device/conv3d_fprop_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) and [backwards](test/unit/conv/device/conv3d_dgrad_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) passes for 3D convolution * [Fused Convolution+Convolution example](./examples/13_two_tensor_op_fusion/README.md) * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.4.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.4.0) (2020-11-19) * Implicit GEMM convolution kernels supporting CUDA and Tensor Cores on NVIDIA GPUs * Operators: forward (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad) convolution * Data type: FP32, complex<FP32>, Tensor Float 32 (TF32), BFloat16 (BF16), Float16, Int4, Int8, Int32 * Spatial dimensions: 1-D, 2-D, and 3-D * Layout: NHWC, NCxHWx * Implicit GEMM convolution components: * Global memory iterators supporting Fprop, Dgrad, and Wgrad * `MmaMultistage` for implicit GEMM convolution for NVIDIA Ampere architecture * `MmaPipeline` for implicit GEMM convolution for NVIDIA Volta and Turing architectures * [Documentation](./media/docs/implicit_gemm_convolution.md) describing Implicit GEMM Convolution algorithm and implementation ## [2.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.3.0) (2020-09-23) * [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/) * [Sparse Tensor Core GEMM kernels](test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu): * Direct access to Sparse Tensor Cores and maximum performance via [`mma.sp.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends) * Fast SGEMM targeting GeForce RTX 30-series CUDA Cores * Minor Features: * [Activation functions](./include/cutlass/epilogue/thread/activation.h) such as [GeLU](./include/cutlass/epilogue/thread/linear_combination_gelu.h) and [Sigmoid](./include/cutlass/epilogue/thread/linear_combination_sigmoid.h) * Small [matrix](./include/cutlass/matrix.h) and [quaternion](./include/cutlass/quaternion.h) template classes in device code * [Floating-point constants](./include/cutlass/constants.h) * NVIDIA Ampere GPU Architecture examples and documentation: * [Tensor Float 32](./examples/14_ampere_tf32_tensorop_gemm/ampere_tf32_tensorop_gemm.cu) and * [Sparse Tensor Cores](./examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu) * Documentation added on CUTLASS [efficient row-major epilogue](./media/docs/gemm_api.md#efficient-epilogue) ## [2.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.2.0) (2020-06-08) * [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/) * Fast Tensor Core operations: * Maximum performance via [`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends) * Tensor Float 32, BFloat16, and double-precision data types * Mixed integer data types (int8, int4, bin1) * Asynchronous copy for deep software pipelines via [`cp.async`](https://docs.nvidia.com/cuda/parallel-thread-execution) * Described in [GTC 2020 Webinar (SR 21745)](https://developer.nvidia.com/gtc/2020/video/s21745) (free registration required) * Features: * SDK examples showing GEMM fused with bias+relu and fused GEMM+GEMM * Complex-valued GEMMs targeting NVIDIA Ampere Tensor Cores in double-precision and Tensor Float 32 * Gaussian complex GEMMs using 3m complex multiply algorithm * Universal GEMM kernel supporting two batch modes and two algorithms for parallel reductions * Policy updates: * [CUDA 11 Toolkit](https://developer.nvidia.com/cuda-toolkit) needed to enable NVIDIA Ampere Architecture features * Disabled F16C by default for compatibility - enable on cmake command line with `-DCUTLASS_ENABLE_F16C=ON` ## [2.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.1.0) (2020-04-06) * BLAS-style host-side API added to [CUTLASS Library](./media/docs/quickstart.md#cutlass-library) * API to launch compiled kernel instances for GEMM and planar complex GEMM * Planar Complex GEMM kernels targeting Volta and Turing Tensor Cores * Computes complex matrix products on matrices stored as disjoint real and imaginary parts * [SDK Examples of Planar Complex GEMMs](./examples/10_planar_complex/planar_complex.cu) * Minor enhancements and bug fixes ## [2.0.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.0.0) (2019-11-19) * Substantially refactored for * Better performance, particularly for native Turing Tensor Cores * Robust and durable templates spanning the design space * Encapsulated functionality embodying modern C++11 programming techniques * Optimized containers and data types for efficient, generic, portable device code * Updates to: * [Quick start guide](./media/docs/quickstart.md) * [Documentation](./README.md#documentation) * [Utilities](./media/docs/utilities.md) * [CUTLASS Profiler](./media/docs/profiler.md) * Native Turing Tensor Cores * Efficient GEMM kernels targeting Turing Tensor Cores * Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands * Coverage of existing CUTLASS functionality * GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs * Volta Tensor Cores through native mma.sync and through WMMA API * Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions * Batched GEMM operations * Complex-valued GEMMs * **Note: a host compiler supporting C++11 or greater is required.** # CUTLASS 1.x ## [1.3.2](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.2) (2019-07-09) * Performance improvement for Volta Tensor Cores TN and TT layouts. ## [1.3.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.1) (2019-04-09) * Corrected NVRTC unit tests. ## [1.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.0) (2019-03-20) * Efficient GEMM kernel targeting Volta Tensor Cores via `mma.sync` instruction added in CUDA 10.1. ## [1.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.2.0) (2018-10-26) * Parallelized reductions across threadblocks ("Split-K") * Improved IGEMM performance * Batched strided WMMA GEMMs ## [1.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.1.0) (2018-09-19) * Turing Features * WMMA GEMM targeting TensorCores - INT8, INT4, 1-bit * Batched Strided GEMM * Threadblock rasterization strategies * Improved performance for adverse problem sizes and data layouts * Extended CUTLASS Core comonents * Tensor views support arbitrary matrix and tensor layouts * Zip iterators for structuring multiple data streams * Enhanced CUTLASS utilities * Reference code for tensor operations in host and device code * Added HostMatrix<> for simplified matrix creation * Examples * Basic GEMM, tensor views, CUTLASS utilities, batched GEMM, WMMA GEMM ## [1.0.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.0.1) (2018-06-11) * Intra-threadblock reduction added for small threadblock tile sizes * sgemm_64x128x16, sgemm_128x128x16, sgemm_128x64x16, sgemm_128x32x16, sgemm_64x64x16, sgemm_64x32x16 * igemm_32x32x128 * GEMM _K_ residue handled during prologue prior to mainloop * Replaced Google Test copy with submodule. Use `git submodule init --recursive --update` ## [1.0.0](https://github.com/NVIDIA/cutlass/commit/2028ebe120aab22bfd0b2baf8902d4c9627eb33f) (2018-05-16) * Substantial rewrite to accommodate new architecture * Kernels: SGEMM, DGEMM, IGEMM, HGEMM, WMMA GEMM * Unit and performance tests ## [0.0.1](https://github.com/NVIDIA/cutlass/commit/d08ba8ac46e2fa3f745e070c390182edb56b2e91) (2017-12-04) * Initial release ## Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/CHANGELOG.md/0
{ "file_path": "cutlass/CHANGELOG.md", "repo_id": "cutlass", "token_count": 13977 }
0
var searchData= [ ['enable_5fif',['enable_if',['../structcutlass_1_1platform_1_1enable__if.html',1,'cutlass::platform']]], ['enable_5fif_3c_20false_2c_20t_20_3e',['enable_if&lt; false, T &gt;',['../structcutlass_1_1platform_1_1enable__if_3_01false_00_01T_01_4.html',1,'cutlass::platform']]], ['enablemma_5fcrow_5fsm60',['EnableMma_Crow_SM60',['../structcutlass_1_1gemm_1_1thread_1_1detail_1_1EnableMma__Crow__SM60.html',1,'cutlass::gemm::thread::detail']]], ['epilogue',['Epilogue',['../classcutlass_1_1epilogue_1_1threadblock_1_1Epilogue.html',1,'cutlass::epilogue::threadblock']]], ['epiloguebase',['EpilogueBase',['../classcutlass_1_1epilogue_1_1threadblock_1_1EpilogueBase.html',1,'cutlass::epilogue::threadblock']]], ['epilogueworkspace',['EpilogueWorkspace',['../classcutlass_1_1epilogue_1_1EpilogueWorkspace.html',1,'cutlass::epilogue']]] ];
cutlass/docs/search/classes_4.js/0
{ "file_path": "cutlass/docs/search/classes_4.js", "repo_id": "cutlass", "token_count": 373 }
1
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference matrix multiply kernel to verify its correctness. The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes all matrices have column-major layout. The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices. See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available in CUTLASS. https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/ Aside from defining and launching the SGEMM kernel, this example does not use any other components or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are prevalent in the CUTLASS unit tests. This example has delibrately been kept similar to the basic_gemm example from cutlass-1.3 to highlight the minimum amount of differences needed to transition to cutlass-2.0. Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // Helper methods to check for errors #include "helper.h" // // CUTLASS includes needed for single-precision GEMM kernel // // Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class. #include "cutlass/gemm/device/gemm.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // // This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object, // and launches it on the CUDA device. // /////////////////////////////////////////////////////////////////////////////////////////////////// /// Define a CUTLASS GEMM template and launch a GEMM kernel. cudaError_t CutlassSgemmNN( int M, int N, int K, float alpha, float const *A, int lda, float const *B, int ldb, float beta, float *C, int ldc) { // Define type definition for single-precision CUTLASS GEMM with column-major // input matrices and 128x128x8 threadblock tile size (chosen by default). // // To keep the interface manageable, several helpers are defined for plausible compositions // including the following example for single-precision GEMM. Typical values are used as // default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details. // // To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h` using ColumnMajor = cutlass::layout::ColumnMajor; using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix ColumnMajor, // Layout of A matrix float, // Data-type of B matrix ColumnMajor, // Layout of B matrix float, // Data-type of C matrix ColumnMajor>; // Layout of C matrix // Define a CUTLASS GEMM type CutlassGemm gemm_operator; // Construct the CUTLASS GEMM arguments object. // // One of CUTLASS's design patterns is to define gemm argument objects that are constructible // in host code and passed to kernels by value. These may include pointers, strides, scalars, // and other arguments needed by Gemm and its components. // // The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible // arguments to kernels and (2.) minimized initialization overhead on kernel entry. // CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions {A, lda}, // Tensor-ref for source matrix A {B, ldb}, // Tensor-ref for source matrix B {C, ldc}, // Tensor-ref for source matrix C {C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix) {alpha, beta}); // Scalars used in the Epilogue // // Launch the CUTLASS GEMM kernel. // cutlass::Status status = gemm_operator(args); // // Return a cudaError_t if the CUTLASS GEMM operator returned an error code. // if (status != cutlass::Status::kSuccess) { return cudaErrorUnknown; } // Return success, if no errors were encountered. return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// // // The source code after this point in the file is generic CUDA using the CUDA Runtime API // and simple CUDA kernels to initialize matrices and compute the general matrix product. // /////////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to initialize a matrix with small integers. __global__ void InitializeMatrix_kernel( float *matrix, int rows, int columns, int seed = 0) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < rows && j < columns) { int offset = i + j * rows; // Generate arbitrary elements. int const k = 16807; int const m = 16; float value = float(((offset + seed) * k % m) - m / 2); matrix[offset] = value; } } /// Simple function to initialize a matrix to arbitrary small integers. cudaError_t InitializeMatrix(float *matrix, int rows, int columns, int seed = 0) { dim3 block(16, 16); dim3 grid( (rows + block.x - 1) / block.x, (columns + block.y - 1) / block.y ); InitializeMatrix_kernel<<< grid, block >>>(matrix, rows, columns, seed); return cudaGetLastError(); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocates device memory for a matrix then fills with arbitrary small integers. cudaError_t AllocateMatrix(float **matrix, int rows, int columns, int seed = 0) { cudaError_t result; size_t sizeof_matrix = sizeof(float) * rows * columns; // Allocate device memory. result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix); if (result != cudaSuccess) { std::cerr << "Failed to allocate matrix: " << cudaGetErrorString(result) << std::endl; return result; } // Clear the allocation. result = cudaMemset(*matrix, 0, sizeof_matrix); if (result != cudaSuccess) { std::cerr << "Failed to clear matrix device memory: " << cudaGetErrorString(result) << std::endl; return result; } // Initialize matrix elements to arbitrary small integers. result = InitializeMatrix(*matrix, rows, columns, seed); if (result != cudaSuccess) { std::cerr << "Failed to initialize matrix: " << cudaGetErrorString(result) << std::endl; return result; } return result; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Naive reference GEMM computation. __global__ void ReferenceGemm_kernel( int M, int N, int K, float alpha, float const *A, int lda, float const *B, int ldb, float beta, float *C, int ldc) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < M && j < N) { float accumulator = 0; for (int k = 0; k < K; ++k) { accumulator += A[i + k * lda] * B[k + j * ldb]; } C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc]; } } /// Reference GEMM computation. cudaError_t ReferenceGemm( int M, int N, int K, float alpha, float const *A, int lda, float const *B, int ldb, float beta, float *C, int ldc) { dim3 block(16, 16); dim3 grid( (M + block.x - 1) / block.x, (N + block.y - 1) / block.y ); ReferenceGemm_kernel<<< grid, block >>>(M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); return cudaGetLastError(); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocate several matrices in GPU device memory and call a single-precision /// CUTLASS GEMM kernel. cudaError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta) { cudaError_t result; // // Define several matrices to be used as operands to GEMM kernels. // // Compute leading dimensions for each matrix. int lda = M; int ldb = K; int ldc = M; // Compute size in bytes of the C matrix. size_t sizeof_C = sizeof(float) * ldc * N; // Define pointers to matrices in GPU device memory. float *A; float *B; float *C_cutlass; float *C_reference; // // Allocate matrices in GPU device memory with arbitrary seeds. // result = AllocateMatrix(&A, M, K, 0); if (result != cudaSuccess) { return result; } result = AllocateMatrix(&B, K, N, 17); if (result != cudaSuccess) { cudaFree(A); return result; } result = AllocateMatrix(&C_cutlass, M, N, 101); if (result != cudaSuccess) { cudaFree(A); cudaFree(B); return result; } result = AllocateMatrix(&C_reference, M, N, 101); if (result != cudaSuccess) { cudaFree(A); cudaFree(B); cudaFree(C_cutlass); return result; } result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice); if (result != cudaSuccess) { std::cerr << "Failed to copy C_cutlass matrix to C_reference: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Launch CUTLASS GEMM. // result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc); if (result != cudaSuccess) { std::cerr << "CUTLASS GEMM kernel failed: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Verify. // // Launch reference GEMM result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc); if (result != cudaSuccess) { std::cerr << "Reference GEMM kernel failed: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // Copy to host and verify equivalence. std::vector<float> host_cutlass(ldc * N, 0); std::vector<float> host_reference(ldc * N, 0); result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { std::cerr << "Failed to copy CUTLASS GEMM results: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { std::cerr << "Failed to copy Reference GEMM results: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Free device memory allocations. // cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); // // Test for bit equivalence of results. // if (host_cutlass != host_reference) { std::cerr << "CUTLASS results incorrect." << std::endl; return cudaErrorUnknown; } return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to basic_gemm example. // // usage: // // 00_basic_gemm <M> <N> <K> <alpha> <beta> // int main(int argc, const char *arg[]) { // // Parse the command line to obtain GEMM dimensions and scalar values. // // GEMM problem dimensions. int problem[3] = { 128, 128, 128 }; for (int i = 1; i < argc && i < 4; ++i) { std::stringstream ss(arg[i]); ss >> problem[i - 1]; } // Scalars used for linear scaling the result of the matrix product. float scalars[2] = { 1, 0 }; for (int i = 4; i < argc && i < 6; ++i) { std::stringstream ss(arg[i]); ss >> scalars[i - 4]; } // // Run the CUTLASS GEMM test. // cudaError_t result = TestCutlassGemm( problem[0], // GEMM M dimension problem[1], // GEMM N dimension problem[2], // GEMM K dimension scalars[0], // alpha scalars[1] // beta ); if (result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == cudaSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/00_basic_gemm/basic_gemm.cu/0
{ "file_path": "cutlass/examples/00_basic_gemm/basic_gemm.cu", "repo_id": "cutlass", "token_count": 5053 }
2
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to use split-k version of matrix multiplication using functions and data structures provided by CUTLASS; which we run on a NVIDIA Volta GPU. What is split-k? Consider a problem size of M = 128, N = 128, K = 4096. In this case, if my thread-block tile size (a tile can be viewed as a 2d matrix) is 128x128x4096, then we launch a singled a thread-block taking up a single SM of 84 SMs present on V100. Hence the efficiency of computation is really low. So, how to solve it? This is where split-k comes in. It is a way of partitioning K-dimension of matrix multiplication and distribute across multiple SMs and get better efficiency than single SM. In the above example, we can partition K-dimension with split-k factor of 16 i.e., thread-block tile size will be 128x128x256 and will be launching on 16 SMs. Once each thread-block computes their partial inner product (1/16th of output), they accumulate to single output matrix. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In this example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on Volta and they support only half-precision floating point (fp16 or half), we use data type for elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot product to fp32, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to compute alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (float), the number of elements per vector memory access (16), data type of accumulator (float) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32, 64x64x4, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, initialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::GemmSplitKParallel template. The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to initialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_splitk_parallel.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm70; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes ? using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- This is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Put all the created template variables to create GemmSplitKParallel template variable using Gemm = cutlass::gemm::device::GemmSplitKParallel<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp>; int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major != 7) { std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75." << std::endl; // Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits. return 0; } // // Define problem size // const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 16 partitions int split_k_slices = 16; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // // Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl; // Returning zero, so this test passes when built with older CUDA Toolkits. Its action are no-op. return 0; } else { return run(); } }
cutlass/examples/06_splitK_gemm/splitk_gemm.cu/0
{ "file_path": "cutlass/examples/06_splitK_gemm/splitk_gemm.cu", "repo_id": "cutlass", "token_count": 6186 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/device/tensor_relu.h" #include "reference/device/tensor_scale_bias.h" #include "helper.h" #define CHECK_GT(val1, val2) \ if((val1) <= (val2)) \ std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n"; #define CHECK_TRUE(val) \ if(!(val)) \ std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n"; //////////////////////////////////////////////////////////////////////////////// template <typename Gemm0_, typename Gemm1_> struct B2bNonFusedGemmRun { using Gemm0 = Gemm0_; using Gemm1 = Gemm1_; using ElementAccumulator = typename Gemm0::ElementAccumulator; using ElementCompute = typename Gemm0::GemmKernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; cutlass::Distribution::Kind init_Bias; uint64_t seed; // // Methods // B2bNonFusedGemmRun( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Bias(init_Bias_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { cutlass::reference::host::TensorFillRandomUniform( view, seed, 2, -2, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else if (dist_kind == cutlass::Distribution::AllZeros) { cutlass::reference::host::TensorFill(view, Element(0)); } else if (dist_kind == cutlass::Distribution::AllOnes) { cutlass::reference::host::TensorFill(view, Element(1)); } else { std::cerr << "Not implemented\n"; return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmCoord problem_size_0, cutlass::gemm::GemmCoord problem_size_1, ElementCompute alpha0 = ElementCompute(1), ElementCompute beta0 = ElementCompute(0), ElementCompute alpha1 = ElementCompute(1), ElementCompute beta1 = ElementCompute(0), bool relu = true, int warm_ups = 1, int runs = 100) { // // Allocate the GEMM workspace // cutlass::HostTensor< typename Gemm0::ElementA, typename Gemm0::LayoutA> tensor_A0(problem_size_0.mk()); cutlass::HostTensor< typename Gemm0::ElementB, typename Gemm0::LayoutB> tensor_B0(problem_size_0.kn()); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> tensor_C0(problem_size_0.mn()); cutlass::HostTensor< ElementCompute, typename Gemm0::LayoutC> tensor_Bias0({1, problem_size_0.n()}); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> tensor_D0(problem_size_0.mn()); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> reference_D0(problem_size_0.mn()); cutlass::HostTensor< typename Gemm1::ElementB, typename Gemm1::LayoutB> tensor_B1(problem_size_1.kn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> tensor_C1(problem_size_1.mn()); cutlass::HostTensor< ElementCompute, typename Gemm1::LayoutC> tensor_Bias1({1, problem_size_1.n()}); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> tensor_D1(problem_size_1.mn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> reference_D1(problem_size_1.mn()); CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019)); CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2018)); CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017)); CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2014)); CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2016)); CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015)); CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2013)); cutlass::reference::host::TensorFill( tensor_D0.host_view()); cutlass::reference::host::TensorFill( tensor_D1.host_view()); cutlass::reference::host::TensorFill( reference_D0.host_view()); cutlass::reference::host::TensorFill( reference_D1.host_view()); tensor_A0.sync_device(); tensor_B0.sync_device(); tensor_C0.sync_device(); tensor_Bias0.sync_device(); tensor_D0.sync_device(); tensor_B1.sync_device(); tensor_C1.sync_device(); tensor_Bias1.sync_device(); tensor_D1.sync_device(); reference_D0.sync_device(); reference_D1.sync_device(); // // Initialize the GEMM operator // typename Gemm0::Arguments arguments_0{ problem_size_0, tensor_A0.device_ref(), tensor_B0.device_ref(), {tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)}, tensor_D0.device_ref(), {alpha0, beta0} }; typename Gemm1::Arguments arguments_1{ problem_size_1, tensor_D0.device_ref(), tensor_B1.device_ref(), {tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)}, tensor_D1.device_ref(), {alpha1, beta1} }; Gemm0 gemm_op_0; Gemm1 gemm_op_1; cutlass::Status status = gemm_op_0.initialize(arguments_0); CUTLASS_CHECK(status); status = gemm_op_1.initialize(arguments_1); CUTLASS_CHECK(status); for(int i = 0; i < warm_ups; i++) { status = gemm_op_0(); CUTLASS_CHECK(status); status = gemm_op_1(); CUTLASS_CHECK(status); } // // Run the GEMM // cudaEvent_t start, stop1, stop2; cudaEventCreate(&start); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventRecord(start); for(int i = 0; i < runs; i++) { status = gemm_op_0(); CUTLASS_CHECK(status); } cudaEventRecord(stop1); for(int i = 0; i < runs; i++) { status = gemm_op_1(); CUTLASS_CHECK(status); } cudaEventRecord(stop2); cudaDeviceSynchronize(); float gemm0Time, gemm1Time, totalTime; cudaEventElapsedTime(&gemm0Time, start, stop1); cudaEventElapsedTime(&gemm1Time, stop1, stop2); cudaEventElapsedTime(&totalTime, start, stop2); std::cout << "gemm 0 time " << gemm0Time / (float)runs << " ms\n"; std::cout << "gemm 1 time " << gemm1Time / (float)runs << " ms\n"; std::cout << "Non-fusion time " << totalTime / (float)runs << " ms\n"; tensor_D0.sync_host(); tensor_D1.sync_host(); // // Verify // cutlass::reference::device::Gemm< typename Gemm0::ElementA, typename Gemm0::LayoutA, typename Gemm0::ElementB, typename Gemm0::LayoutB, typename Gemm0::ElementC, typename Gemm0::LayoutC, ElementCompute, ElementAccumulator, typename Gemm0::Operator> reference_gemm_0; cutlass::reference::device::Gemm< typename Gemm1::ElementA, typename Gemm1::LayoutA, typename Gemm1::ElementB, typename Gemm1::LayoutB, typename Gemm1::ElementC, typename Gemm1::LayoutC, ElementCompute, ElementAccumulator, typename Gemm1::Operator> reference_gemm_1; reference_gemm_0( problem_size_0, alpha0, tensor_A0.device_ref(), tensor_B0.device_ref(), beta0, {tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)}, reference_D0.device_ref() ); if(relu) { cutlass::reference::device::TensorReLu(reference_D0.device_view()); } reference_gemm_1( problem_size_1, alpha1, reference_D0.device_ref(), tensor_B1.device_ref(), beta1, {tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)}, reference_D1.device_ref() ); if(relu) { cutlass::reference::device::TensorReLu(reference_D1.device_view()); } // Wait for kernels to finish cudaDeviceSynchronize(); reference_D0.sync_host(); reference_D1.sync_host(); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0); bool passed = cutlass::reference::host::TensorEquals( reference_D1.host_view(), tensor_D1.host_view()); CHECK_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_B2bGemm_device_nonfused.txt"; std::cerr << "Dumping results in " << fname.str() << "\n"; std::ofstream file(fname.str()); file << "A0 =\n" << tensor_A0.host_view() << "\nB0 =\n" << tensor_B0.host_view() << "\nC0 =\n" << tensor_C0.host_view() << "\nBias0:\n" << tensor_Bias0.host_view() << "\n" << "\nD0 =\n" << tensor_D0.host_view() << "\nB1 =\n" << tensor_B1.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\nBias1:\n" << tensor_Bias1.host_view() << "\n" << "\n\nReference =\n" << reference_D1.host_view() << "\nComputed =\n" << tensor_D1.host_view(); } return passed; } }; template <typename B2bGemm_> struct B2bFusedGemmRun { using B2bGemm = B2bGemm_; using ElementAccumulator = typename B2bGemm::ElementAccumulator; using ElementCompute = typename B2bGemm::B2bGemmKernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; cutlass::Distribution::Kind init_Scale; cutlass::Distribution::Kind init_Bias; uint64_t seed; // // Methods // B2bFusedGemmRun( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { cutlass::reference::host::TensorFillRandomUniform( view, seed, 2, -2, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else if (dist_kind == cutlass::Distribution::AllZeros) { cutlass::reference::host::TensorFill(view, Element(0)); } else if (dist_kind == cutlass::Distribution::AllOnes) { cutlass::reference::host::TensorFill(view, Element(1)); } else { std::cerr << "Not implemented\n"; return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmCoord problem_size_0, cutlass::gemm::GemmCoord problem_size_1, ElementCompute alpha0 = ElementCompute(1), ElementCompute beta0 = ElementCompute(0), ElementCompute alpha1 = ElementCompute(1), ElementCompute beta1 = ElementCompute(0), cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm, // batch_count is used as split-k when mode is kGemm according // to the GemmUniversal interface int batch_count = 1, int64_t batch_stride_A0 = 0, int64_t batch_stride_B0 = 0, int64_t batch_stride_C0 = 0, int64_t batch_stride_B1 = 0, int64_t batch_stride_C1 = 0, int64_t batch_stride_D1 = 0, int64_t batch_stride_Bias0 = 0, int64_t batch_stride_Scale0 = 0, bool relu = true, int warm_ups = 1, int runs = 100) { // // Allocate the GEMM workspace // cutlass::gemm::GemmCoord CoordA0(problem_size_0.m(), problem_size_0.n(), batch_count * problem_size_0.k()); cutlass::gemm::GemmCoord CoordB0(problem_size_0.m(), problem_size_0.n(), batch_count * problem_size_0.k()); cutlass::gemm::GemmCoord CoordC0(problem_size_0.m(), batch_count * problem_size_0.n(), problem_size_0.k()); cutlass::gemm::GemmCoord CoordB1(problem_size_1.m(), problem_size_1.n(), batch_count * problem_size_1.k()); cutlass::gemm::GemmCoord CoordC1(problem_size_1.m(), batch_count * problem_size_1.n(), problem_size_1.k()); cutlass::HostTensor< typename B2bGemm::ElementA, typename B2bGemm::LayoutA> tensor_A0(CoordA0.mk()); cutlass::HostTensor< typename B2bGemm::ElementB, typename B2bGemm::LayoutB> tensor_B0(CoordB0.kn()); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutC> tensor_C0(CoordC0.mn()); cutlass::HostTensor< typename B2bGemm::ElementScaleBias, typename B2bGemm::LayoutScaleBias> tensor_Scale0; if(alpha0 == ElementCompute(0)) //per-channel scale tensor_Scale0.resize({1, batch_count * problem_size_0.n()}); cutlass::HostTensor< typename B2bGemm::ElementScaleBias, typename B2bGemm::LayoutScaleBias> tensor_Bias0({1, batch_count * problem_size_0.n()}); cutlass::HostTensor< ElementAccumulator, typename B2bGemm::LayoutC> reference_Z0(CoordC0.mn()); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutC> reference_D0(CoordC0.mn()); cutlass::HostTensor< typename B2bGemm::ElementB, typename B2bGemm::LayoutB> tensor_B1(CoordB1.kn()); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutC> tensor_C1(CoordC1.mn()); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutScaleBias> tensor_Bias1({1, batch_count * problem_size_1.n()}); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutC> tensor_D1(CoordC1.mn()); cutlass::HostTensor< typename B2bGemm::ElementC, typename B2bGemm::LayoutC> reference_D1(CoordC1.mn()); CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019)); CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2018)); CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017)); if(alpha0 == ElementCompute(0)) //per-channel scale CHECK_TRUE(initialize_tensor(tensor_Scale0.host_view(), init_Scale, seed + 2014)); CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2013)); CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2016)); CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015)); CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2012)); cutlass::reference::host::TensorFill( tensor_D1.host_view()); cutlass::reference::host::TensorFill( reference_D0.host_view()); cutlass::reference::host::TensorFill( reference_D1.host_view()); tensor_A0.sync_device(); tensor_B0.sync_device(); tensor_C0.sync_device(); if(alpha0 == ElementCompute(0)) //per-channel scale tensor_Scale0.sync_device(); tensor_Bias0.sync_device(); tensor_B1.sync_device(); tensor_C1.sync_device(); tensor_Bias1.sync_device(); tensor_D1.sync_device(); reference_D0.sync_device(); reference_D1.sync_device(); // // Initialize the GEMM operator // typename B2bGemm::Arguments arguments{ mode, problem_size_0, problem_size_1, tensor_A0.device_ref(), tensor_B0.device_ref(), tensor_C0.device_ref(), tensor_Scale0.device_ref(), tensor_Bias0.device_ref(), tensor_B1.device_ref(), {tensor_Bias1.device_data(), typename B2bGemm::LayoutC::Stride(0)}, tensor_D1.device_ref(), batch_stride_A0, batch_stride_B0, batch_stride_B1, batch_stride_C1, batch_stride_D1, batch_stride_Bias0, batch_stride_Scale0, {alpha0, beta0}, {alpha1, beta1}, batch_count, }; B2bGemm b2b_gemm_op; cutlass::Status status = b2b_gemm_op.can_implement(arguments); if(status != cutlass::Status::kSuccess) { std::cout << "Problem sizes not supported.\n" << "Requirments:\n" << " problem_size_0.M = problem_size_1.M\n" << " problem_size_0.N = problem_size_1.K\n" << " ThreadblockShape0::kN = problem_size_0.N\n" << " ThreadblockShape1::kN = problem_size_1.N" << std::endl; } status = b2b_gemm_op.initialize(arguments); CUTLASS_CHECK(status); for(int i = 0; i < warm_ups; i++) { status = b2b_gemm_op(); CUTLASS_CHECK(status); } // // Run the GEMM // cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for(int i = 0; i < runs; i++) { status = b2b_gemm_op(); CUTLASS_CHECK(status); } cudaEventRecord(stop); cudaDeviceSynchronize(); float gemmTime; cudaEventElapsedTime(&gemmTime, start, stop); std::cout << "Fusion time " << gemmTime / (float)runs << " ms\n"; tensor_D1.sync_host(); // // Verify // cutlass::reference::device::GemmComplex< typename B2bGemm::ElementA, typename B2bGemm::LayoutA, typename B2bGemm::ElementB, typename B2bGemm::LayoutB, ElementAccumulator, typename B2bGemm::LayoutC, ElementAccumulator, ElementAccumulator >( problem_size_0, ElementAccumulator(1), //intermediate alpha=1 tensor_A0.device_ref(), cutlass::ComplexTransform::kNone, tensor_B0.device_ref(), cutlass::ComplexTransform::kNone, ElementAccumulator(0), //beta = 0 reference_Z0.device_ref(), reference_Z0.device_ref(), ElementAccumulator(0), int(batch_count), batch_stride_A0, batch_stride_B0, batch_stride_C0, batch_stride_C0 ); cutlass::reference::device::TensorScaleBiasGemmBatched< ElementAccumulator, typename B2bGemm::ElementC, typename B2bGemm::LayoutC, ElementCompute, typename B2bGemm::LayoutScaleBias > ( problem_size_0, reference_Z0.device_ref(), reference_D0.device_ref(), alpha0, tensor_Scale0.device_ref(), tensor_Bias0.device_ref(), int(batch_count), batch_stride_C0, batch_stride_C0, batch_stride_Scale0, batch_stride_Bias0 ); if(relu) { cutlass::reference::device::TensorReLu(reference_D0.device_view()); } cutlass::reference::device::GemmComplex< typename B2bGemm::ElementA, typename B2bGemm::LayoutA, typename B2bGemm::ElementB, typename B2bGemm::LayoutB, typename B2bGemm::ElementC, typename B2bGemm::LayoutC, ElementCompute, ElementAccumulator >( problem_size_1, alpha1, //intermediate alpha=1 reference_D0.device_ref(), cutlass::ComplexTransform::kNone, tensor_B1.device_ref(), cutlass::ComplexTransform::kNone, beta1, //beta = 0 {tensor_Bias1.device_data(), typename B2bGemm::LayoutC::Stride(0)}, reference_D1.device_ref(), ElementAccumulator(0), int(batch_count), batch_stride_C0, batch_stride_B1, batch_stride_C1, batch_stride_D1 ); if(relu) { cutlass::reference::device::TensorReLu(reference_D1.device_view()); } cudaDeviceSynchronize(); reference_D0.sync_host(); reference_D1.sync_host(); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0); bool passed = cutlass::reference::host::TensorEquals( reference_D1.host_view(), tensor_D1.host_view()); CHECK_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_B2bGemm_device_fused.txt"; std::cerr << "Dumping results in " << fname.str() << "\n"; std::ofstream file(fname.str()); file << "A0 =\n" << tensor_A0.host_view() << "\nB0 =\n" << tensor_B0.host_view() << "\nC0 =\n" << tensor_C0.host_view() << "\nScale0:\n" << tensor_Scale0.host_view() << "\n" << "\nBias0:\n" << tensor_Bias0.host_view() << "\n" << "\nB1 =\n" << tensor_B1.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\nBias1:\n" << tensor_Bias1.host_view() << "\n" << "\n\nReference =\n" << reference_D1.host_view() << "\nComputed =\n" << tensor_D1.host_view(); } return passed; } }; ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/b2b_gemm_run.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/b2b_gemm_run.h", "repo_id": "cutlass", "token_count": 10562 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are accommodated by exchanging A and B operands and assuming transposed layouts. Partial specializations here choose 'device::GemmTransposed' to implement this functionality. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_pipelined.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/vector_iterator.h" #include "cutlass/transform/threadblock/predicated_vector_access_iterator.h" #include "kernel/b2b_gemm.h" #include "threadblock/default_b2b_mma.h" #include "threadblock/default_b2b_mma_smem_accumulator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1, /// Warp-level tile size (concept: GemmShape) typename WarpShape0, /// Warp-level tile size (concept: GemmShape) typename WarpShape1, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0, /// Epilogue output operator typename EpilogueOutputOp1, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Operation performed by GEMM typename Operator> struct DefaultB2bGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, Operator, true> { /// Define the threadblock-scoped matrix multiply-accumulate using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, Stages, Operator, EpilogueOutputOp0, false, true>::ThreadblockB2bMma; static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1, EpilogueOutputOp1::kCount>::Epilogue; /// Define the kernel-level GEMM operator. using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Turing Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1, /// Warp-level tile size (concept: GemmShape) typename WarpShape0, /// Warp-level tile size (concept: GemmShape) typename WarpShape1, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0, /// Epilogue output operator typename EpilogueOutputOp1, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Operation performed by GEMM typename Operator > struct DefaultB2bGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, Operator, true > { /// Define the threadblock-scoped matrix multiply-accumulate using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, 2, Operator, EpilogueOutputOp0, false, true >::ThreadblockB2bMma; static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1, EpilogueOutputOp1::kCount >::Epilogue; /// Define the kernel-level GEMM operator. using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>; }; /// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout template < /// Element type for A matrix operand typename ElementA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1, /// Warp-level tile size (concept: GemmShape) typename WarpShape0, /// Warp-level tile size (concept: GemmShape) typename WarpShape1, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0, /// Epilogue output operator typename EpilogueOutputOp1, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Number of Interleaved k int InterleavedK, /// Operation performed by GEMM typename Operator> struct DefaultB2bGemm< ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA, ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, Operator, true> { using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>; using ElementAccumulator = int32_t; /// Define the threadblock-scoped matrix multiply-accumulate using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, Stages, Operator, EpilogueOutputOp0, true, true>::ThreadblockB2bMma; static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock:: DefaultInterleavedEpilogueTensorOp< ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1, 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue; /// Define the kernel-level GEMM operator. using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Turing Integer Tensor Core Interleaved layout template < /// Element type for A matrix operand typename ElementA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1, /// Warp-level tile size (concept: GemmShape) typename WarpShape0, /// Warp-level tile size (concept: GemmShape) typename WarpShape1, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0, /// Epilogue output operator typename EpilogueOutputOp1, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of Interleaved k int InterleavedK, /// Operation performed by GEMM typename Operator> struct DefaultB2bGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA, ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, 2, Operator, true> { using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>; using ElementAccumulator = int32_t; /// Define the threadblock-scoped matrix multiply-accumulate using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, 2, Operator, EpilogueOutputOp0, true, true>::ThreadblockB2bMma; static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK; /// Define the epilogue for the 2nd Gemm using Epilogue = typename cutlass::epilogue::threadblock:: DefaultInterleavedEpilogueTensorOp< ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1, 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue; /// Define the kernel-level GEMM operator. using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm_smem_accumulator.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm_smem_accumulator.h", "repo_id": "cutlass", "token_count": 5060 }
5
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implements several threadblock-swizzling functions for grouped kernels */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/kernel/grouped_problem_visitor.h" #include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h" #include "kernel/b2b_gemm_grouped_problem_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { struct GroupedThreadblockSwizzleBase {}; /// Helper for determining if a swizzling function is specialized for grouped operation template <typename ThreadblockSwizzle> struct IsGroupedSwizzle { static bool const value = cutlass::platform::is_base_of<GroupedThreadblockSwizzleBase, ThreadblockSwizzle>::value; }; } // namespace detail /// Swizzling function for grouped kernels template <typename ProblemVisitor_> struct GroupedThreadblockSwizzle : detail::GroupedThreadblockSwizzleBase { using ProblemVisitor = ProblemVisitor_; ProblemVisitor problem_visitor; CUTLASS_HOST_DEVICE GroupedThreadblockSwizzle(typename ProblemVisitor::Params& params, typename ProblemVisitor::SharedStorage& shared_storage, int block_idx) : problem_visitor(params, shared_storage, block_idx) {} /// Obtains the threadblock offset (in units of threadblock-scoped tiles) CUTLASS_DEVICE GemmCoord get_tile_offset(int /*log_tile*/) const { GemmCoord problem_size = problem_visitor.problem_size(); int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); return GemmCoord(int(threadblock_idx / grid_shape.n()), int(threadblock_idx % grid_shape.n()), 0); } /// Dummy method to satisfy API for threadblock swizzling functions CUTLASS_HOST_DEVICE static int get_log_tile(GemmCoord /*tiled_shape*/) { return 0; } }; template < typename ThreadblockShape, typename LayoutC, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_ = cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly, int PrefetchTileCount = 128, int ThreadCount = PrefetchTileCount> struct B2bGemmGroupedThreadblockSwizzle : GroupedThreadblockSwizzle< cutlass::gemm::kernel::B2bGemmGroupedProblemVisitor< ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount, platform::is_same<LayoutC, cutlass::layout::ColumnMajor>::value > > { using Base = GroupedThreadblockSwizzle<cutlass::gemm::kernel::B2bGemmGroupedProblemVisitor< ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount, platform::is_same<LayoutC, cutlass::layout::ColumnMajor>::value>>; CUTLASS_HOST_DEVICE B2bGemmGroupedThreadblockSwizzle(typename Base::ProblemVisitor::Params& params, typename Base::ProblemVisitor::SharedStorage& shared_storage, int block_idx) : Base(params, shared_storage, block_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/examples/13_two_tensor_op_fusion/threadblock/grouped_threadblock_swizzle.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/grouped_threadblock_swizzle.h", "repo_id": "cutlass", "token_count": 2141 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example adopts example 16 to use 3xTF32 to bring FP32 accuracy with 2x performance compared with CUDA Cores. See example 27 for the trick of 3xTF32. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = float; // Data type of elements in input tensor using ElementInputB = float; // Data type of elements in input tensor using ElementOutput = float; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination // 3xTF32 Fprop using Conv2dFpropKernel_3xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, // Only thing needs to be changed from normal Fprop cutlass::arch::OpMultiplyAddFastF32, IteratorAlgorithm >::Kernel; // 1xTF32 Fprop using Conv2dFpropKernel_1xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm_3xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_3xTF32>; using ImplicitGemm_1xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_1xTF32>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 4; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "28_ampere_3xtf32_fast_accurate_tensorop_fprop example\n\n" << " This example uses Ampere's Tensor Core operators on F16 data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n" << "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), error(cudaSuccess), l2_norm_3xtf32_vs_fp64(0), l2_norm_1xtf32_vs_fp64(0), l2_norm_fp32_vs_fp64(0) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64; return out; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(options.input_size); cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(options.filter_size); cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(options.output_size()); cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), 1, ElementInputA(7), ElementInputA(-8)); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), 1, ElementInputB(7), ElementInputB(-8)); // Fill tensor C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), 1, ElementInputB(7), ElementInputB(-8)); // Fill tensor D on host with zeros cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(options.input_size); cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(options.filter_size); cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(options.output_size()); cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(options.output_size()); cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(options.output_size()); cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(options.output_size()); // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Construct ImplicitGemm::Argument structure with conv2d // problem size, data pointers, and epilogue values typename ImplicitGemm_3xTF32::Arguments arguments_3xTF32{ problem_size, tensor_a_F32.device_ref(), tensor_b_F32.device_ref(), tensor_c_F32.device_ref(), tensor_d_3xTF32.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm_3xTF32 implicit_gemm_op_3xTF32; size_t workspace_size_3xTF32 = implicit_gemm_op_3xTF32.get_workspace_size(arguments_3xTF32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xTF32(workspace_size_3xTF32); result.status = implicit_gemm_op_3xTF32.can_implement(arguments_3xTF32); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op_3xTF32.initialize(arguments_3xTF32, workspace_3xTF32.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op_3xTF32(); CUTLASS_CHECK(result.status); // // Performance measurement // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op_3xTF32(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run 1xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Construct ImplicitGemm::Argument structure with conv2d // problem size, data pointers, and epilogue values typename ImplicitGemm_1xTF32::Arguments arguments_1xTF32{ problem_size, tensor_a_F32.device_ref(), tensor_b_F32.device_ref(), tensor_c_F32.device_ref(), tensor_d_1xTF32.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm_1xTF32 implicit_gemm_op_1xTF32; size_t workspace_size_1xTF32 = implicit_gemm_op_1xTF32.get_workspace_size(arguments_1xTF32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xTF32(workspace_size_1xTF32); result.status = implicit_gemm_op_1xTF32.can_implement(arguments_1xTF32); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op_1xTF32.initialize(arguments_1xTF32, workspace_1xTF32.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op_1xTF32(); CUTLASS_CHECK(result.status); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// cutlass::reference::device::Conv2d< double, LayoutInputA, double, LayoutInputB, double, LayoutOutput, double, double >( cutlass::conv::Operator::kFprop, problem_size, tensor_a_F64.device_ref(), tensor_b_F64.device_ref(), tensor_c_F64.device_ref(), tensor_d_F64.device_ref(), options.alpha, options.beta); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// cutlass::reference::device::Conv2d< float, LayoutInputA, float, LayoutInputB, float, LayoutOutput, float, float >( cutlass::conv::Operator::kFprop, problem_size, tensor_a_F32.device_ref(), tensor_b_F32.device_ref(), tensor_c_F32.device_ref(), tensor_d_F32.device_ref(), options.alpha, options.beta); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(options.output_size()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(options.output_size()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(options.output_size()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); /////////////////////////////////////////////////////////////////////////////// if (options.save_workspace) { std::stringstream ss; ss << "28_ampere_3xtf32_fast_accurate_tensorop_fprop_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a_F32.host_view() << "\n\n" << "Filters = \n" << tensor_b_F32.host_view() << "\n\n"; output_workspace << "TF32x3 = \n" << tensor_d_3xTF32.host_view() << std::endl; output_workspace << "TF32x1 = \n" << tensor_d_1xTF32.host_view() << std::endl; output_workspace << "FP32 = \n" << tensor_d_F32.host_view() << std::endl; output_workspace << "FP64 = \n" << tensor_d_F64.host_view() << "\n\n"; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu/0
{ "file_path": "cutlass/examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu", "repo_id": "cutlass", "token_count": 10595 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include <cmath> #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/arch/memory.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/kernel/default_gemm.h" #include "cutlass/gemm/kernel/default_gemm_complex.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor.h" #include "cutlass/reduction/kernel/reduce_softmax_final.h" ///////////////////////////////////////////////////////////////////////////////////////////////// #include "gemm_with_epilogue_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // // Kernel computes partial reduction // // // 2. Sum[m, n'] = sum_n(exp(D[m, n] - N[m, 0])) // template < typename ElementD_, typename ElementNorm_, typename ElementSum_, typename ElementSoft_, typename ElementSoftmaxCompute_, int Alignment, typename ApplyShape_ = MatrixShape<1, 1024> > class ApplySoftmax { public: using ElementD = ElementD_; using ElementNorm = ElementNorm_; using ElementSum = ElementSum_; using ElementSoft = ElementSoft_; using ElementSoftmaxCompute = ElementSoftmaxCompute_; static int const kAlignment = Alignment; using ApplyShape = ApplyShape_; using Layout = cutlass::layout::RowMajor; using TensorRefD = TensorRef<ElementD, Layout>; using TensorRefN = TensorRef<ElementNorm, Layout>; using TensorRefSum = TensorRef<ElementSum, Layout>; using TensorRefSoft = TensorRef<ElementSoft, Layout>; using FragmentSoftmax = Array<ElementSoftmaxCompute, kAlignment>; // // Arguments // struct Arguments { MatrixCoord extent; ///< Extent of D and Softmax matrices int batch_count; ///< Batch count TensorRefD ref_D; ///< D matrix computed by GEMM+Max (input) TensorRefN ref_N; ///< Norm tensor (input) TensorRefSum ref_S; ///< Sum tensor (input) TensorRefSoft ref_Soft; ///< Softmax tensor (output) int64_t batch_stride_D; ///< Batch stride for D tensor int64_t batch_stride_N; ///< Batch stride for N tensor int64_t batch_stride_S; ///< Batch stride for S tensor int64_t batch_stride_Soft; ///< Batch stride for softmax tensor // // Methods // Arguments(): batch_count(1), batch_stride_D(0), batch_stride_N(0), batch_stride_S(0), batch_stride_Soft(0) { } Arguments( MatrixCoord extent_, ///< Extent of D and Softmax matrices int batch_count_, ///< Batch count TensorRefD ref_D_, ///< D matrix computed by GEMM+PartialReduce TensorRefN ref_N_, ///< Output parameter for N TensorRefSum ref_S_, ///< Output parameter for N TensorRefSoft ref_Soft_, ///< Softmax int64_t batch_stride_D_ = 0, int64_t batch_stride_N_ = 0, int64_t batch_stride_S_ = 0, int64_t batch_stride_Soft_ = 0 ): extent(extent_), batch_count(batch_count_), ref_D(ref_D_), ref_N(ref_N_), ref_S(ref_S_), ref_Soft(ref_Soft_), batch_stride_D(batch_stride_D_), batch_stride_N(batch_stride_N_), batch_stride_S(batch_stride_S_), batch_stride_Soft(batch_stride_Soft_) { } }; // // Params struct // struct Params { Arguments args; // // Methods // Params() { } Params(Arguments const &args_): args(args_) { } }; // // SharedStorage // struct SharedStorage { }; private: public: CUTLASS_DEVICE ApplySoftmax() { } CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { apply(params, shared_storage); } private: /// Compute Softmax CUTLASS_DEVICE void apply(Params const &params, SharedStorage &shared_storage) { using AccessTypeD = AlignedArray<ElementD, kAlignment>; int block_batch = blockIdx.z; int block_m = blockIdx.x * ApplyShape::kRow; int block_n = 0; int thread_m = threadIdx.y; int thread_n = threadIdx.x * kAlignment; int idx_m = block_m + thread_m; int idx_n = block_n + thread_n; int batch_offset_norm = block_batch * params.args.batch_stride_N; int batch_offset_sum = block_batch * params.args.batch_stride_S; // Kill off thread if it is outside the row boundary if (params.args.extent.row() <= idx_m) { return; } // // Setup pointers to load D again // using AccessTypeD = AlignedArray<ElementD, kAlignment>; using AccessTypeSoft = AlignedArray<ElementSoft, kAlignment>; using FragmentSoft = Array<ElementSoft, kAlignment>; using ConvertSoftCompute = cutlass::NumericArrayConverter<ElementSoftmaxCompute, ElementD, kAlignment>; using ConvertSoftOutput = cutlass::NumericArrayConverter<ElementSoft, ElementSoftmaxCompute, kAlignment>; using Mul = cutlass::multiplies<FragmentSoftmax>; using Minus = cutlass::minus<FragmentSoftmax>; using Exp = cutlass::fast_exp_op<FragmentSoftmax>; ConvertSoftCompute convert_soft_compute; ConvertSoftOutput convert_soft_output; Minus minus; Mul mul; Exp exponential; using ConvertSum = cutlass::NumericConverter<ElementSoftmaxCompute, ElementSum>; using ConvertNorm = cutlass::NumericConverter<ElementSoftmaxCompute, ElementNorm>; ConvertSum convert_sum; ConvertNorm convert_norm; AccessTypeD *access_d = reinterpret_cast<AccessTypeD *>( params.args.ref_D.data() + params.args.batch_stride_D * block_batch + params.args.ref_D.layout()({idx_m, idx_n})); AccessTypeSoft *access_soft = reinterpret_cast<AccessTypeSoft *>( params.args.ref_Soft.data() + params.args.batch_stride_Soft * block_batch + params.args.ref_Soft.layout()({idx_m, idx_n})); ElementSum inv_sum = (params.args.ref_S.data())[idx_m + batch_offset_sum]; ElementNorm norm = (params.args.ref_N.data())[idx_m + batch_offset_norm]; // // Loop // CUTLASS_PRAGMA_UNROLL for ( int idx = 0; idx < params.args.extent.column(); idx += ApplyShape::kColumn * kAlignment) { if (idx_n < params.args.extent.column()) { AccessTypeD fetch; arch::global_load<AccessTypeD, sizeof(AccessTypeD)>(fetch, access_d, true); FragmentSoftmax result = mul(exponential(minus(convert_soft_compute(fetch), convert_norm(norm))), convert_sum(inv_sum)); FragmentSoft soft = convert_soft_output(result); arch::global_store<FragmentSoft, sizeof(FragmentSoft)>(soft, access_soft, true); } access_d += ApplyShape::kColumn; access_soft += ApplyShape::kColumn; idx_n += ApplyShape::kColumn * kAlignment; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel ///////////////////////////////////////////////////////////////////////////////////////////////// /// template < typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename ElementCompute_, typename OperatorClass_, typename ArchTag_, typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, typename EpilogueFunctorOp_, int kStages_, typename ApplyShape_ = MatrixShape<1, 1024>, int AlignmentA_ = 128 / cutlass::sizeof_bits<ElementA_>::value, int AlignmentB_ = 128 / cutlass::sizeof_bits<ElementB_>::value, int AlignmentSoftmax_ = 128 / cutlass::sizeof_bits<ElementC_>::value, typename ElementNorm_ = float, typename ElementSum_ = float, typename ElementSoftmax_ = ElementC_ > class GemmSoftmax { public: /////////////////////////////////////////////////////////////////////////////////////////////// // // Type definitions // using ElementA = ElementA_; using ElementB = ElementB_; using ElementC = ElementC_; using ElementCompute = ElementCompute_; using ElementSum = ElementSum_; using ElementSoft = ElementSoftmax_; using ElementSoftmaxCompute = float; using LayoutA = LayoutA_; using LayoutB = LayoutB_; using EpilogueFunctorOp = EpilogueFunctorOp_; using ElementNorm = ElementNorm_; using ApplyShape = ApplyShape_; // These are mandatory layouts. using LayoutC = cutlass::layout::RowMajor; using LayoutN = cutlass::layout::RowMajor; using LayoutS = cutlass::layout::RowMajor; using LayoutSoft = cutlass::layout::RowMajor; using TensorRefA = TensorRef<ElementA, LayoutA>; using TensorRefB = TensorRef<ElementB, LayoutB>; using TensorRefC = TensorRef<ElementC, LayoutC>; using TensorRefN = TensorRef<ElementNorm, LayoutN>; using TensorRefSum = TensorRef<ElementSum, LayoutS>; using TensorRefSoft = TensorRef<ElementSoft, LayoutSoft>; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; static int const kStages = kStages_; static int const AlignmentA = AlignmentA_; static int const AlignmentB = AlignmentB_; static int const AlignmentSoftmax = AlignmentSoftmax_; using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle; /////////////////////////////////////////////////////////////////////////////////////////////// // basic GEMM kernel using DefaultGemmKernel = typename cutlass::gemm::kernel::DefaultGemm< ElementA, LayoutA, AlignmentA, ElementB, LayoutB, AlignmentB, ElementC, LayoutC, ElementCompute, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueFunctorOp, ThreadblockSwizzle, kStages, true, typename cutlass::gemm::device::DefaultGemmConfiguration< OperatorClass, ArchTag, ElementA, ElementB, ElementC, ElementCompute>::Operator, cutlass::gemm::SharedMemoryClearOption::kNone >::GemmKernel; /////////////////////////////////////////////////////////////////////////////////////////////// // Epilogue visitor using EpilogueVisitor = typename cutlass::epilogue::threadblock::EpilogueVisitorSoftmax< ThreadblockShape, DefaultGemmKernel::kThreadCount, typename DefaultGemmKernel::Epilogue::OutputTileIterator, ElementCompute, ElementNorm, ElementSum, ElementSoftmaxCompute, EpilogueFunctorOp >; /// Epilogue using Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue< EpilogueVisitor, typename DefaultGemmKernel::Epilogue >::Epilogue; // GEMM using GemmKernel = gemm::kernel::GemmWithEpilogueVisitor< typename DefaultGemmKernel::Mma, Epilogue, ThreadblockSwizzle >; // Softmax kernel using SoftmaxApplyKernel = kernel::ApplySoftmax< ElementC, ElementNorm, ElementSum, ElementSoft, ElementSoftmaxCompute, AlignmentSoftmax, ApplyShape >; using ApplyFinalReductionKernel = cutlass::reduction::kernel::ApplySoftmaxFinalReduction< ElementNorm, ElementSum, ElementSoftmaxCompute, ThreadblockShape >; public: /// Arguments class struct Arguments { typename GemmKernel::Arguments gemm; typename SoftmaxApplyKernel::Arguments softmax; typename ApplyFinalReductionKernel::Arguments reduction; cutlass::gemm::GemmCoord extend; // // Methods // Arguments() { } Arguments( cutlass::gemm::GemmCoord problem_size, int32_t batch_count_, TensorRefA ref_A_, TensorRefB ref_B_, TensorRefC ref_C_, TensorRefC ref_D_, typename EpilogueFunctorOp::Params linear_scaling, TensorRefN ref_N_, TensorRefSum ref_S_, TensorRefSoft ref_Softmax_, int64_t batch_stride_A_ = 0, int64_t batch_stride_B_ = 0, int64_t batch_stride_C_ = 0, int64_t batch_stride_D_ = 0, int64_t batch_stride_Max_ = 0, int64_t batch_stride_Sum_ = 0, int64_t batch_stride_Softmax_ = 0 ): gemm( cutlass::gemm::GemmUniversalMode::kBatched, problem_size, batch_count_, ref_A_, ref_B_, ref_C_, ref_D_, ref_N_.data(), ref_S_.data(), batch_stride_A_, batch_stride_B_, typename EpilogueVisitor::Arguments( linear_scaling, batch_stride_C_, batch_stride_D_, batch_stride_Max_, batch_stride_Sum_ ) ), reduction( problem_size, ref_N_.data(), ref_S_.data(), batch_stride_Max_, batch_stride_Sum_ ), softmax( MatrixCoord(problem_size.m(), problem_size.n()), batch_count_, ref_D_, ref_N_, ref_S_, ref_Softmax_, batch_stride_D_, batch_stride_Max_, batch_stride_Sum_, batch_stride_Softmax_ ), extend(problem_size) { } }; struct Params { typename GemmKernel::Params gemm; typename SoftmaxApplyKernel::Params softmax; typename ApplyFinalReductionKernel::Params reduction; MatrixCoord extend; // // Methods // Params() { } Params(Arguments const &args): gemm(args.gemm), reduction(args.reduction), softmax(args.softmax), extend(MatrixCoord(args.extend.m(), args.extend.n())) { } }; public: // Gemm // // Methods // private: Params params_; public: /// Ctor GemmSoftmax() { } /// Initialize Status initialize(Arguments const &args) { params_ = Params(args); return cutlass::Status::kSuccess; } /// Run Status run(cudaStream_t stream) { // // Launch the GEMM + max kernel // dim3 gemm_grid = ThreadblockSwizzle().get_grid_shape(params_.gemm.grid_tiled_shape); dim3 gemm_block(GemmKernel::kThreadCount, 1, 1); int gemm_smem_size = int(sizeof(typename GemmKernel::SharedStorage)); cudaError_t result; if (gemm_smem_size >= (48 << 10)) { result = cudaFuncSetAttribute(cutlass::Kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, gemm_smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } cutlass::Kernel<GemmKernel><<<gemm_grid, gemm_block, gemm_smem_size, stream>>>(params_.gemm); result = cudaGetLastError(); if (result != cudaSuccess) { return cutlass::Status::kErrorInternal; } // // Launch the ApplyFinalReductionKernel // int thread_per_block = 128; int block_per_row = (params_.extend.row() + thread_per_block - 1) / thread_per_block; if (block_per_row < 4) { thread_per_block = 32; block_per_row = (params_.extend.row() + thread_per_block - 1) / thread_per_block; } dim3 final_reduction_grid(block_per_row, 1, params_.softmax.args.batch_count); dim3 final_reduction_block(thread_per_block); Kernel<ApplyFinalReductionKernel><<< final_reduction_grid, final_reduction_block, sizeof(typename ApplyFinalReductionKernel::SharedStorage), stream >>>(params_.reduction); result = cudaGetLastError(); if (result != cudaSuccess) { return cutlass::Status::kErrorInternal; } // // Launch the SoftmaxApplyKernel // dim3 apply_block(SoftmaxApplyKernel::ApplyShape::kColumn, SoftmaxApplyKernel::ApplyShape::kRow); int threadblock_rows = SoftmaxApplyKernel::ApplyShape::kRow; int threadblock_columns = SoftmaxApplyKernel::ApplyShape::kColumn * SoftmaxApplyKernel::kAlignment; dim3 apply_grid( (params_.softmax.args.extent.row() + threadblock_rows - 1) / threadblock_rows, (params_.softmax.args.extent.column() + threadblock_columns - 1) / threadblock_columns, params_.softmax.args.batch_count); Kernel<SoftmaxApplyKernel><<< apply_grid, apply_block, sizeof(typename SoftmaxApplyKernel::SharedStorage), stream >>>(params_.softmax); result = cudaGetLastError(); if (result != cudaSuccess) { return cutlass::Status::kErrorInternal; } return cutlass::Status::kSuccess; } /// Function call operator Status operator()(cudaStream_t stream = nullptr) { return run(stream); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/35_gemm_softmax/gemm_with_softmax.h/0
{ "file_path": "cutlass/examples/35_gemm_softmax/gemm_with_softmax.h", "repo_id": "cutlass", "token_count": 7230 }
8
################################################################################ # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ import sys print("This example is deprecated. Please see examples/python for examples of using " "the CUTLASS Python interface.") sys.exit(0) import numpy as np import cutlass.backend as pycutlass from cutlass.backend import * from cutlass.backend.utils.device import device_cc from cutlass.backend.conv2d_operation import * from cutlass.backend.utils.reference_model import Conv2dReferenceModule import torch.nn.functional as F import argparse # parse the arguments parser = argparse.ArgumentParser(description="Launch CUTLASS convolution 2d kernels from Python") # Operation description # math instruction description parser.add_argument("-i", "--instruction_shape", default=[1, 1, 1], nargs=3, type=int, help="This option describes the size of MMA op") parser.add_argument("-ta", "--element_a", default="float32", type=str, choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'], help='Data type of elements in input tensor A') parser.add_argument("-tb", "--element_b", default="float32", type=str, choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'], help='Data type of elements in input tensor B') parser.add_argument("-tc", "--element_c", default="float32", type=str, choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'], help='Data type of elements in input tensor C and output tensor D') parser.add_argument("-tacc", "--element_acc", default="float32", type=str, choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'], help='Data type of accumulator') parser.add_argument('-m', "--math", default="multiply_add", type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction") parser.add_argument('-op', "--opcode", default="Simt", type=str, choices=["Simt", 'TensorOp'], help='This option describes whether you want to use tensor \ cores (TensorOp) or regular SIMT cores (Simt) on GPU SM') # tile description parser.add_argument("-b", "--threadblock_shape", default=[128, 128, 8], nargs=3, type=int, help="This option describes the tile size a thread block with compute") parser.add_argument("-s", "--stages", default=4, type=int, help="Number of pipelines you want to use") parser.add_argument("-w", "--warp_count", default=[ 4, 2, 1], nargs=3, type=int, help="This option describes the number of warps along M, N, and K of the threadblock") parser.add_argument("-cc", "--compute_capability", default=80, type=int, help="This option describes CUDA SM architecture number") # A parser.add_argument('-la', "--layout_a", default="TensorNHWC", type=str, choices=[ "TensorNHWC", "TensorNC32HW32"], help="Memory layout of input tensor A") parser.add_argument('-aa', '--alignment_a', default=1, type=int, help="Memory alignement of input tensor A") # B parser.add_argument('-lb', "--layout_b", default="TensorNHWC", type=str, choices=[ "TensorNHWC", "TensorC32RSK32"], help="Memory layout of input tensor B") parser.add_argument('-ab', '--alignment_b', default=1, type=int, help="Memory alignment of input tensor B") # C parser.add_argument('-lc', "--layout_c", default="TensorNHWC", type=str, choices=[ "TensorNHWC", "TensorNC32HW32"], help="Memory layout of input tensor C and output tensor D") parser.add_argument('-ac', '--alignment_c', default=1, type=int, help="Memory alignment of input tensor C and output tensor D") # epilogue parser.add_argument("-te", "--element_epilogue", default="float32", type=str, choices=['float64', 'float32', 'float16', 'bfloat16'], help='Data type of computation in the epilogue') parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination", type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'], help="This option describes the epilogue part of the kernel") # swizzling parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[ "IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle", "StridedDgradIdentitySwizzle1", "StridedDgradIdentitySwizzle4", "StridedDgradHorizontalSwizzle"], help="This option describes how thread blocks are scheduled on GPU") # conv related parser.add_argument("-co", "--conv_kind", default="fprop", type=str, choices=['fprop', 'dgrad', 'wgrad'], help="The type of convolution: forward propagation (fprop), \ gradient of activation (dgrad), gradient of weight (wgrad)") parser.add_argument("-st", "--stride_support", default="Strided", type=str, choices=["Strided", "Unity"], ) parser.add_argument("-ia", "--iterator_algorithm", default="analytic", type=str, choices=["analytic", "optimized", "fixed_channels", "few_channels"], help="This option describes iterator algorithm") # arguments parser.add_argument("-sm", "--split_k_mode", default="Serial", type=str, choices=["Serial", "Parallel"], help="Split K Mode. Serial is used for non-splitK or serial-splitK.\ Parallel is used for parallel splitK.") parser.add_argument('-k', '--split_k_slices', default=1, type=int, help="Number of split-k partitions. (default 1)") parser.add_argument("-nhwc", "--nhwc", nargs=4, type=int, help="input size (NHWC)") parser.add_argument("-krsc", "--krsc", nargs=4, type=int, help="filter size (KRSC)") parser.add_argument("-pad", "--pad", nargs=4, type=int, help="padding (pad_h, _, pad_w, _)") parser.add_argument("-stride", "--stride", nargs=2, type=int, help="stride (stride_h, stride_w)") parser.add_argument("-dilation", "--dilation", nargs=2, type=int, help="dilation (dilation_h, dilation_w)") parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha") parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta") parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector") # Activation function parser.add_argument("-activ", "--activation_function", default="identity", choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function") parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float, help="addition arguments for activation") parser.add_argument('--print_cuda', action="store_true", help="print the underlying CUDA kernel") try: args = parser.parse_args() except: sys.exit(0) cc = device_cc() if args.compute_capability != cc: raise Exception(("Parameter --compute-capability of {} " "does not match that of the device of {}.").format(args.compute_capability, cc)) pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32) np.random.seed(0) element_a = getattr(cutlass_bindings, args.element_a) element_b = getattr(cutlass_bindings, args.element_b) element_c = getattr(cutlass_bindings, args.element_c) element_acc = getattr(cutlass_bindings, args.element_acc) math_operation = getattr(MathOperation, args.math) opclass = getattr(cutlass_bindings.OpClass, args.opcode) math_inst = MathInstruction( args.instruction_shape, element_a, element_b, element_acc, opclass, math_operation ) tile_description = TileDescription( args.threadblock_shape, args.stages, args.warp_count, math_inst ) layout_a = getattr(cutlass_bindings, args.layout_a) layout_b = getattr(cutlass_bindings, args.layout_b) layout_c = getattr(cutlass_bindings, args.layout_c) A = TensorDescription( element_a, layout_a, args.alignment_a ) B = TensorDescription( element_b, layout_b, args.alignment_b ) C = TensorDescription( element_c, layout_c, args.alignment_c ) element_epilogue = getattr(cutlass_bindings, args.element_epilogue) if (args.activation_function == "identity" or (args.split_k_mode == "Parallel" and args.split_k_slices > 1)): # epilogue_functor = getattr(pycutlass, args.epilogue_functor)( C.element, C.alignment, math_inst.element_accumulator, element_epilogue) else: epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")( getattr(pycutlass, args.activation_function)(element_epilogue), C.element, C.alignment, math_inst.element_accumulator, element_epilogue) iterator_algorithm = getattr(cutlass_bindings.conv.IteratorAlgorithm, args.iterator_algorithm) swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor) stride_support = getattr(StrideSupport, args.stride_support) conv_kind = getattr(cutlass_bindings.conv.Operator, args.conv_kind) operation = Conv2dOperation( conv_kind=conv_kind, iterator_algorithm=iterator_algorithm, arch=args.compute_capability, tile_description=tile_description, A=A, B=B, C=C, stride_support=stride_support, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor ) if args.print_cuda: print(operation.rt_module.emit()) operations = [operation,] if args.split_k_mode == "Parallel" and args.split_k_slices > 1: if (args.activation_function == "identity"): epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)( C.element, C.alignment, math_inst.element_accumulator, element_epilogue) else: epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")( getattr(pycutlass, args.activation_function)(element_epilogue), C.element, C.alignment, math_inst.element_accumulator, element_epilogue) reduction_operation = ReductionOperation( shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment), C=C, element_accumulator=element_acc, element_compute=element_epilogue, epilogue_functor=epilogue_functor_reduction, count=C.alignment ) operations.append(reduction_operation) pycutlass.compiler.add_module(operations) problem_size = cutlass_bindings.conv.Conv2dProblemSize( cutlass_bindings.Tensor4DCoord(args.nhwc[0], args.nhwc[1], args.nhwc[2], args.nhwc[3]), cutlass_bindings.Tensor4DCoord(args.krsc[0], args.krsc[1], args.krsc[2], args.krsc[3]), cutlass_bindings.Tensor4DCoord(args.pad[0], args.pad[1], args.pad[2], args.pad[3]), cutlass_bindings.MatrixCoord(args.stride[0], args.stride[1]), cutlass_bindings.MatrixCoord(args.dilation[0], args.dilation[1]), cutlass_bindings.conv.Mode.cross_correlation, args.split_k_slices, 1 ) # User-provide inputs tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size( conv_kind, problem_size ) tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size( conv_kind, problem_size ) if args.bias: tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_extent( conv_kind, problem_size ).at(3) else: tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size( conv_kind, problem_size ) tensor_D_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size( conv_kind, problem_size ) if args.element_a != "int8": tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-8.5, 7.5)) else: tensor_A = torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-2, 2) if args.element_b != "int8": tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-8.5, 7.5)) else: tensor_B = torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-2, 2) if args.element_c != "int8": tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-8.5, 7.5)) else: tensor_C = torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-2, 2) tensor_D = torch.ones(size=(tensor_D_size,), dtype=getattr(torch, args.element_c), device="cuda") arguments = Conv2dArguments( operation=operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)), split_k_mode=getattr(cutlass_bindings.conv.SplitKMode, args.split_k_mode), split_k_slices=problem_size.split_k_slices ) if args.split_k_mode == "Parallel" and args.split_k_slices > 1: implicit_gemm_size = cutlass_bindings.conv.implicit_gemm_problem_size(conv_kind, arguments.problem_size) reduction_arguments = ReductionArguments( reduction_operation, problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()], partitions=problem_size.split_k_slices, workspace=arguments.ptr_D, destination=tensor_D, source=tensor_C, output_op = reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)), bias = arguments.bias ) operation.run(arguments) if args.split_k_mode == "Parallel" and args.split_k_slices > 1: reduction_operation.run(reduction_arguments) reduction_arguments.sync() else: arguments.sync() reference_model = Conv2dReferenceModule(A, B, C, conv_kind) tensor_D_ref = reference_model.run(tensor_A, tensor_B, tensor_C, arguments.problem_size, args.alpha, args.beta, args.bias) if (args.activation_function != "identity"): tensor_D_ref = getattr(F, args.activation_function)(*([tensor_D_ref,] + args.activation_args)) try: assert torch.equal(tensor_D, tensor_D_ref) except: assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2) print("Passed.")
cutlass/examples/40_cutlass_py/customizable/conv2d.py/0
{ "file_path": "cutlass/examples/40_cutlass_py/customizable/conv2d.py", "repo_id": "cutlass", "token_count": 6379 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Attention Example. This workload computes a fused multi head attention. Because it keeps the attention matrix in shared memory, it's both faster and uses less global memory. This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_, and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_. Algorithm: In short, we can compute the output incrementally in blocks of size B, we just need to divide the final result by the sum of all coefficients in the softmax (which we compute incrementally) with the following pseudo-code: ``` s_prime = torch.zeros([num_queries, B]) O = torch.zeros([num_queries, head_size_v]) for i in range(0, K.shape[0], B): si = exp((Q . K[i * B:(i+1) * B].t) * scale) sum_coefs += attn_unscaled.sum(-1) O += si . V[i * B:(i+1) * B] O = O / s_prime ``` In practice, and for numerical stability reasons, we also substract the maximum so far (`mi`) before doing the exponential. When we encounter new keys, the maximum used to compute O so far (`m_prime`) can differ from the current maximum, so we update O before accumulating with ``` O = O * exp(m_prime - mi) m_prime = mi ``` Implementation details: - `si` is stored in shared memory between the 2 back to back gemms - we keep and accumulate the output directly in registers if we can (`head_size_v <= 128`). Otherwise, we store it & accumulate in global memory (slower) - blocks are parallelized across the batch dimension, the number of heads, and the query sequence size Examples: # Run an attention example with default setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen # Run an attention example with custom setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers). */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <vector> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/default_gemm_grouped.h" #include "cutlass/gemm/device/gemm_grouped.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "cutlass/gemm/kernel/default_gemm.h" #include "cutlass/gemm/kernel/default_gemm_complex.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor.h" #include "cutlass/fast_math.h" #include "kernel_forward.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; bool use_mask; bool causal; std::vector<cutlass::gemm::GemmCoord> problem_sizes0; std::vector<cutlass::gemm::GemmCoord> problem_sizes1; std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real; std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real; int alignment; int head_number; int batch_size; int head_size; int head_size_v; int seq_length; int seq_length_kv; int iterations; // alpha0, alpha1 and beta are fixed // in this multi-head attention example float alpha0; float alpha1; float beta; // // Methods // Options(): help(false), error(false), alignment(1), reference_check(true), head_number(12), batch_size(16), head_size(64), head_size_v(64), seq_length(1024), seq_length_kv(1024), use_mask(false), iterations(20), causal(false) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alignment", alignment, 1); cmd.get_cmd_line_argument("head_number", head_number, 12); cmd.get_cmd_line_argument("batch_size", batch_size, 16); cmd.get_cmd_line_argument("head_size", head_size, 64); cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size); cmd.get_cmd_line_argument("seq_length", seq_length, 1024); cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length); cmd.get_cmd_line_argument("use_mask", use_mask, false); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("causal", causal, true); randomize_problems(); } void randomize_problems() { int problem_count = head_number * batch_size; problem_sizes0.reserve(problem_count); problem_sizes1.reserve(problem_count); // When using mask, the original inputs are not padded // and we need to save these info. if (use_mask) { problem_sizes0_real.reserve(problem_count); problem_sizes1_real.reserve(problem_count); } for (int i = 0; i < batch_size; ++i) { // problems belonging to the same batch share the same seq len int m_real = seq_length; int mkv_real = seq_length_kv; int m = (m_real + alignment - 1) / alignment * alignment; int mkv = (mkv_real + alignment - 1) / alignment * alignment; int k0 = head_size; int k1 = head_size_v; for (int j = 0; j < head_number; ++j) { cutlass::gemm::GemmCoord problem0(m, mkv, k0); cutlass::gemm::GemmCoord problem1(m, k1, mkv); problem_sizes0.push_back(problem0); problem_sizes1.push_back(problem1); if (use_mask) { cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0); cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real); problem_sizes0_real.push_back(problem0_real); problem_sizes1_real.push_back(problem1_real); } } } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "41_fused_multi_head_attention_fixed_seqlen\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n" << " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n" << " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n" << " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n" << " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n" << " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n" << " --use_mask=<bool> If true, performs padding-like masking in softmax.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n" << " --causal=<bool> If true, uses causal masking.\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fops = int64_t(); for (size_t i = 0; i < problem_sizes0.size(); ++i) { auto const& problem0 = problem_sizes0[i]; auto const& problem1 = problem_sizes1[i]; for (int row = 0; row < problem0.m(); ++row) { int num_cols0 = problem0.n(); if (causal) { num_cols0 = std::min(row + 1, num_cols0); } // P <- Q . K_t fops += 2 * num_cols0 * problem0.k(); // P <- exp(P - max(P)) fops += 2 * num_cols0; // S <- sum(P) fops += num_cols0 - 1; // O <- P . V fops += 2 * num_cols0 * problem1.n(); // O <- O / S fops += num_cols0 * problem1.n(); } } return double(fops) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Attention> class TestbedAttention { public: // // Type definitions // using ElementQ = typename Attention::scalar_t; using ElementK = typename Attention::scalar_t; using ElementP = typename Attention::accum_t; using ElementAccumulator = typename Attention::accum_t; using ElementV = typename Attention::scalar_t; using ElementO = typename Attention::output_t; using ElementCompute = typename Attention::accum_t; using ElementNorm = typename Attention::accum_t; using ElementSum = typename Attention::accum_t; using ElementSoftmaxCompute = typename Attention::accum_t; using LayoutQ = cutlass::layout::RowMajor; using LayoutK = cutlass::layout::ColumnMajor; using LayoutP = cutlass::layout::RowMajor; using LayoutV = cutlass::layout::RowMajor; using LayoutO = cutlass::layout::RowMajor; using MatrixCoord = typename LayoutP::TensorCoord; private: // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_Q; cutlass::Distribution::Kind init_K; cutlass::Distribution::Kind init_P; cutlass::Distribution::Kind init_V; cutlass::Distribution::Kind init_O; uint32_t seed; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real; std::vector<int64_t> offset_Q; std::vector<int64_t> offset_K; std::vector<int64_t> offset_P; std::vector<int64_t> offset_V; std::vector<int64_t> offset_O; std::vector<int64_t> ldq_host; std::vector<int64_t> ldk_host; std::vector<int64_t> ldp_host; std::vector<int64_t> ldv_host; std::vector<int64_t> ldo_host; std::vector<int64_t> seqlen_host; cutlass::DeviceAllocation<int64_t> ldq; cutlass::DeviceAllocation<int64_t> ldk; cutlass::DeviceAllocation<int64_t> ldp; cutlass::DeviceAllocation<int64_t> ldv; cutlass::DeviceAllocation<int64_t> ldo; cutlass::DeviceAllocation<int64_t> seqlen; cutlass::DeviceAllocation<ElementQ> block_Q; cutlass::DeviceAllocation<ElementK> block_K; cutlass::DeviceAllocation<ElementP> block_P; cutlass::DeviceAllocation<ElementV> block_V; cutlass::DeviceAllocation<ElementO> block_O; cutlass::DeviceAllocation<ElementNorm> block_Norm; cutlass::DeviceAllocation<ElementSum> block_Sum; cutlass::DeviceAllocation<int64_t> offset_P_Device; cutlass::DeviceAllocation<ElementQ *> ptr_Q; cutlass::DeviceAllocation<ElementK *> ptr_K; cutlass::DeviceAllocation<ElementP *> ptr_P; cutlass::DeviceAllocation<ElementV *> ptr_V; cutlass::DeviceAllocation<ElementO *> ptr_O; public: // // Methods // TestbedAttention( Options &options_, cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { } int problem_count() const { return (options.head_number * options.batch_size); } private: /// Helper to initialize a tensor view template <typename Element> void initialize_tensor_( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<ElementP>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 8; scope_min = -8; } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Initializes data structures void initialize_() { // // Set scalors for the mha example // options.alpha0 = 1.0f / sqrt(float(options.head_size)); options.alpha1 = 1.0f; options.beta = 0; // // Choose random problem sizes // // construct a few problems of random sizes srand(seed); int64_t total_elements_Q = 0; int64_t total_elements_K = 0; int64_t total_elements_P = 0; int64_t total_elements_V = 0; int64_t total_elements_O = 0; ldq_host.resize(problem_count()); ldk_host.resize(problem_count()); ldp_host.resize(problem_count()); ldv_host.resize(problem_count()); ldo_host.resize(problem_count()); seqlen_host.resize(problem_count()); // Create tensors in BMHK format, where // B = batch_size // M = sequence length // H = num_heads // K = embedding size per head int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O; for (int32_t b = 0; b < options.batch_size; ++b) { batch_offset_Q = total_elements_Q; batch_offset_K = total_elements_K; batch_offset_V = total_elements_V; batch_offset_O = total_elements_O; for (int32_t h = 0; h < options.head_number; ++h) { int32_t i = h + b * options.head_number; auto problem0 = options.problem_sizes0.at(i); auto problem1 = options.problem_sizes1.at(i); ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0); ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0); ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0); ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0); ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0); // m = n for attention problems. seqlen_host.at(i) = problem0.m(); offset_Q.push_back(batch_offset_Q + h * problem0.k()); offset_K.push_back(batch_offset_K + h * problem0.k()); offset_P.push_back(total_elements_P); offset_V.push_back(batch_offset_V + h * problem0.k()); offset_O.push_back(batch_offset_O + h * problem1.n()); int64_t elements_Q = problem0.m() * problem0.k(); int64_t elements_K = problem0.k() * problem0.n(); int64_t elements_P = problem0.m() * problem0.n(); int64_t elements_V = problem1.k() * problem1.n(); int64_t elements_O = problem1.m() * problem1.n(); total_elements_Q += elements_Q; total_elements_K += elements_K; total_elements_P += elements_P; total_elements_V += elements_V; total_elements_O += elements_O; } } problem_sizes_device0.reset(problem_count()); problem_sizes_device1.reset(problem_count()); problem_sizes_device0.copy_from_host(options.problem_sizes0.data()); problem_sizes_device1.copy_from_host(options.problem_sizes1.data()); if (options.use_mask) { problem_sizes_device0_real.reset(problem_count()); problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data()); } ldq.reset(problem_count()); ldk.reset(problem_count()); ldp.reset(problem_count()); ldv.reset(problem_count()); ldo.reset(problem_count()); seqlen.reset(problem_count()); ldq.copy_from_host(ldq_host.data()); ldk.copy_from_host(ldk_host.data()); ldp.copy_from_host(ldp_host.data()); ldv.copy_from_host(ldv_host.data()); ldo.copy_from_host(ldo_host.data()); seqlen.copy_from_host(seqlen_host.data()); // // Assign pointers // block_Q.reset(total_elements_Q); block_K.reset(total_elements_K); block_P.reset(total_elements_P); block_V.reset(total_elements_V); block_O.reset(total_elements_O); offset_P_Device.reset(problem_count()); // sync offset with device cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size()); std::vector<ElementQ *> ptr_Q_host(problem_count()); std::vector<ElementK *> ptr_K_host(problem_count()); std::vector<ElementP *> ptr_P_host(problem_count()); std::vector<ElementV *> ptr_V_host(problem_count()); std::vector<ElementO *> ptr_O_host(problem_count()); std::vector<ElementNorm *> ptr_norm_host(problem_count()); std::vector<ElementSum *> ptr_sum_host(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i); ptr_K_host.at(i) = block_K.get() + offset_K.at(i); ptr_P_host.at(i) = block_P.get() + offset_P.at(i); ptr_V_host.at(i) = block_V.get() + offset_V.at(i); ptr_O_host.at(i) = block_O.get() + offset_O.at(i); } ptr_Q.reset(problem_count()); ptr_Q.copy_from_host(ptr_Q_host.data()); ptr_K.reset(problem_count()); ptr_K.copy_from_host(ptr_K_host.data()); ptr_P.reset(problem_count()); ptr_P.copy_from_host(ptr_P_host.data()); ptr_V.reset(problem_count()); ptr_V.copy_from_host(ptr_V_host.data()); ptr_O.reset(problem_count()); ptr_O.copy_from_host(ptr_O_host.data()); // // Initialize the problems of the workspace // initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1); initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2); initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3); } template<typename Element> bool verify_tensor_(std::vector<Element> vector_Input, \ std::vector<Element> vector_Input_Ref, int64_t verify_length = -1) { int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size(); size = (verify_length == -1) ? size : verify_length; // 0.05 for absolute error float abs_tol = 5e-2f; // 10% for relative error float rel_tol = 1e-1f; for (int64_t i = 0; i < size; ++i) { float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i)); float abs_diff = fabs(diff); float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f); float relative_diff = abs_diff / abs_ref; if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) { printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i))); return false; } } return true; } /// Verifies the result is a GEMM bool verify_() { bool passed = true; for (int32_t b = 0; b < options.batch_size; ++b) { int32_t i = b * options.head_number; // Problem size is the same for all heads cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number); cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number); MatrixCoord extent_Q{problem0.m(), problem0.k()}; MatrixCoord extent_K{problem0.k(), problem0.n()}; MatrixCoord extent_P{problem0.m(), problem0.n()}; MatrixCoord extent_V{problem1.k(), problem1.n()}; MatrixCoord extent_O{problem1.m(), problem1.n()}; LayoutO layout_O(ldo_host.at(i)); std::vector<ElementO> matrix_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size()); cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O)); for (int32_t h = 0; h < options.head_number; ++h) { i = h + b * options.head_number; LayoutQ layout_Q(ldq_host.at(i)); LayoutK layout_K(ldk_host.at(i)); LayoutP layout_P(ldp_host.at(i)); LayoutV layout_V(ldv_host.at(i)); cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q); cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K); cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V); cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O); cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P)); cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P); // Reference GEMM cutlass::reference::device::GemmComplex< ElementQ, LayoutQ, ElementK, LayoutK, ElementP, LayoutP, ElementCompute, ElementAccumulator >( problem0, ElementAccumulator(options.alpha0), view_Q, Attention::MM0::Mma::kTransformA, view_K, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_P_device, view_Ref_P_device, ElementAccumulator(0) ); // Compute softmax for P. We need to explicitly compute softmax // over P because softmax is fused to the second GEMM in the // profiled implementation. std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P)); cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size()); cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P); std::vector<ElementNorm> vector_Norm_Ref(problem0.m()); std::vector<ElementSum> vector_Sum_Ref(problem0.m()); int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n(); // Compute softmax for reference matrix for (int m = 0; m < problem0.m(); m++) { int n_dim_row = n_dim; if (options.causal) { n_dim_row = std::min(m + 1, n_dim); } ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0})); for (int n = 1; n < n_dim_row; n++) { max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n}))); } vector_Norm_Ref.at(m) = ElementNorm(max); ElementSoftmaxCompute sum = ElementSoftmaxCompute(); for (int n = 0; n < n_dim_row; n++) { sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ); } ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum); vector_Sum_Ref.at(m) = ElementSum(inv_sum); for (int n = 0; n < n_dim_row; n++) { view_Ref_host.ref().at({m, n}) = ElementP( std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum ); } // Mask out the rest of the attention matrix for (int n = n_dim_row; n < n_dim; ++n) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } // when not using mask, problem_real and problem share the same sizes if (options.use_mask) { for (int m = 0; m < problem0.m(); m++) { for (int n = n_dim; n < problem0.n(); n++) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } } cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size()); // Reference GEMM cutlass::reference::device::GemmComplex< ElementP, LayoutP, ElementV, LayoutV, ElementO, LayoutO, ElementCompute, ElementAccumulator >( problem1, ElementAccumulator(options.alpha1), view_Ref_P_device, Attention::MM0::Mma::kTransformA, view_V, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_O_device, view_Ref_O_device, ElementAccumulator(0) ); } // Copy to host memory std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size()); // printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n", // int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i])); bool verified_O = false; if (!verified_O) { verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O); } passed = passed && verified_O; if (!passed) { std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl; if (!verified_O) { std::cout << "Final matrix output is incorrect" << std::endl; } return passed; } } return passed; } public: /// Executes a CUTLASS Attention kernel and measures runtime. Result profile() { Result result; result.passed = false; // Initialize the problem initialize_(); typename Attention::Params p; { // set parameters p.query_ptr = block_Q.get(); p.key_ptr = block_K.get(); p.value_ptr = block_V.get(); p.logsumexp_ptr = nullptr; // Only needed for bw p.output_accum_ptr = nullptr; if (Attention::kNeedsOutputAccumulatorBuffer) { cudaMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t)); } p.output_ptr = block_O.get(); // TODO: support arbitrary seq lengths // if (cu_seqlens_q.has_value()) { // p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); // p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); // } p.scale = options.alpha0; p.num_heads = options.head_number; p.num_batches = options.batch_size; p.head_dim = options.head_size; p.head_dim_value = options.head_size_v; p.num_queries = options.seq_length; p.num_keys = options.seq_length_kv; if (options.causal) { p.custom_mask_type = Attention::CausalFromTopLeft; } // All tensors are in BMHK shapes p.q_strideH = options.head_size; p.k_strideH = options.head_size; p.v_strideH = options.head_size_v; p.q_strideM = int32_t(ldq_host[0]); p.k_strideM = int32_t(ldk_host[0]); p.v_strideM = int32_t(ldv_host[0]); p.q_strideB = p.q_strideM * options.seq_length; p.k_strideB = p.k_strideM * options.seq_length_kv; p.v_strideB = p.v_strideM * options.seq_length_kv; p.o_strideM = p.head_dim_value * p.num_heads; } // launch kernel :) constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>; int smem_bytes = sizeof(typename Attention::SharedStorage); if (smem_bytes > 0xc000) { cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); } if (!Attention::check_supported(p)) { std::cerr << "Kernel does not support these inputs" << std::endl; return result; } kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); // Wait for completion result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (options.reference_check) { result.passed = verify_(); } // // Warm-up run // kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl; return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } std::cout << std::endl; std::cout << "CUTLASS Attention:\n" << "====================================================" << std::endl; std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \ << ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\ << ", " << options.batch_size << "}." << std::endl; std::cout << std::endl; std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template < int kQueriesPerBlock, int kKeysPerBlock, int kMaxK > int run_attention(Options& options) { using Attention = AttentionKernel< cutlass::half_t, // scalar_t cutlass::arch::Sm80, // ArchTag true, // Memory is aligned kQueriesPerBlock, kKeysPerBlock, kMaxK, false, // Supports dropout false // Supports bias >; // // Test and profile // TestbedAttention<Attention> testbed(options); Result result = testbed.profile(); if (!result.passed) { std::cout << "Profiling CUTLASS attention has failed.\n"; std::cout << "\nFailed\n"; return -1; } std::cout << "\nPassed\n"; return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } if (options.use_mask) { std::cerr << "--use_mask is not supported at the moment\n"; return -2; } if (options.alignment != 1) { std::cerr << "--alignment=1 is the only supported value\n"; return -2; } // Determine kernel configuration based on head size. // If head size is less than or equal to 64, each block operates over 64 queries and // 64 keys, and partial results can be stored in the register file. // If head size is greater than 64, each block operates over 32 queries and 128 keys, // and partial results are stored in shared memory. if (options.head_size_v > 64) { static int const kQueriesPerBlock = 32; static int const kKeysPerBlock = 128; if (options.head_size_v <= 128) { return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options); } else { return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options); } } else { static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller static int const kQueriesPerBlock = 64; static int const kKeysPerBlock = 64; return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options); } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/fused_multihead_attention_fixed_seqlen.cu/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/fused_multihead_attention_fixed_seqlen.cu", "repo_id": "cutlass", "token_count": 15573 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Inspired from "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM operands from a RowMajor shared-memory layout into registers to use by A100 TensorCores. The difference with "mma_tensor_op_tile_access_iterator.h" is that: (1) We use "ldmatrix" to load tiles, rather than manual loads (slightly faster) (2) We support to transpose the operand (eg read `A.transpose()` when the shared memory holds `A`) This is only implemented for the specific shapes. */ #pragma once #include <cutlass/gemm/gemm.h> //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { template < /// Operand identity Operand Operand_, /// Data type of A elements typename Element_, typename InstructionShape_, bool kTranspose = false> class WarpIteratorFromSmem { public: /// Shape of tile to load (concept: MatrixShape) using Shape = cutlass::MatrixShape<32, 32>; /// Operand tag static Operand const kOperand = Operand_; static_assert( kOperand == Operand::kA, "No support for OperandB at the moment"); /// Basic check static_assert( kOperand == Operand::kA || kOperand == Operand::kB, "WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; static_assert(sizeof_bits<Element>::value == 16, "Only supported for half"); /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; static_assert(InstructionShape::kRow == 16, "Only supports 16x8x8 / 16x8x16"); static_assert( InstructionShape::kColumn == 8 || InstructionShape::kColumn == 16, "Only supports 16x8x8 / 16x8x16"); /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = 1; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Number of elements accessed per Shared Memory load static int const kElementsPerAccess = (sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value); using InstructionCount = MatrixShape< Shape::kRow / InstructionShape::kRow, Shape::kColumn / InstructionShape::kColumn>; static int const kIterations = (kOperand == Operand::kA) ? InstructionCount::kColumn : InstructionCount::kRow; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array< Element, (kOperand == Operand::kA) ? (Shape::kRow* InstructionShape::kColumn / kThreads) : (Shape::kColumn* InstructionShape::kRow / kThreads)>; /// Memory access type // using AccessType = AlignedArray<Element, kElementsPerAccess>; using AccessType = Array<unsigned, 4>; static int constexpr kWarpShapeDivisibleInner = (kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow); static int constexpr kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4; // Number of 32bits tiles to load per `ldmatrix` static int const kTilesPerInstruction = InstructionShape::kRow / 8; static_assert(kTilesPerInstruction == 2, "Only supports 16x8x16 and 16x8x8"); private: /// Underlying tensor reference TensorRef ref_; /// Origin MatrixCoord origin_; /// Iterations in a tile int iterations_; public: /// Constructor from TensorRef CUTLASS_HOST_DEVICE WarpIteratorFromSmem(TensorRef const& ref, int lane_id) : WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id) {} CUTLASS_HOST_DEVICE WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id) : ref_(ref), iterations_(0) { // See also: // https://docs.nvidia.com/cuda/archive/11.7.1/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-1688 // 16x8x8: kAccessesInner = 1 (1 ldmatrix.x4) // 16x8x16: kAccessesInner = 2 (2 ldmatrix.x4) int ldsm_vec_num = (lane_id >> 3); if (kOperand == Operand::kA) { origin_ = MatrixCoord(lane_id % 8, 0); static_assert( InstructionCount::kRow * kTilesPerInstruction == 4, "can't use ldmatrix.x4"); int access_m_idx = ldsm_vec_num % kTilesPerInstruction; int inner_idx = (ldsm_vec_num / kTilesPerInstruction) % kAccessesInner; int inst_m_idx = ldsm_vec_num / (kTilesPerInstruction * kAccessesInner); MatrixCoord offset( access_m_idx * 8 + inst_m_idx * InstructionShape::kRow, inner_idx * 4 * kElementsPerAccess); if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } origin_ += offset; } else { // Note: This is not tested or used origin_ = MatrixCoord(0, lane_id % 8); static_assert(InstructionCount::kColumn * kAccessesInner == 4, ""); CUTLASS_PRAGMA_UNROLL for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) { CUTLASS_PRAGMA_UNROLL for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { int access_idx = inner_idx + kAccessesInner * inst_n_idx; MatrixCoord offset( inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8); if (access_idx == ldsm_vec_num) { if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } origin_ += offset; } } } } ref_.add_coord_offset(origin_); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset) { TensorCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); if (kTranspose) { coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()}; } origin_ += coord_offset; ref_.add_coord_offset(coord_offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE void advance() { if (kOperand == Operand::kA) { add_tile_offset({0, 1}); } else { add_tile_offset({1, 0}); } iterations_ = 0; } /// increase iterations in a tile CUTLASS_HOST_DEVICE WarpIteratorFromSmem& operator++() { iterations_++; if (iterations_ >= kIterations) advance(); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_DEVICE void load(Fragment& frag) const { AccessType* access_ptr = reinterpret_cast<AccessType*>(&frag); using LoadLayout = typename platform:: conditional<kTranspose, layout::ColumnMajor, layout::RowMajor>::type; CUTLASS_PRAGMA_UNROLL for (int access_m_idx = 0; access_m_idx < (InstructionCount::kRow * kTilesPerInstruction * kAccessesInner) / 4; ++access_m_idx) { MatrixCoord offset; if (kOperand == Operand::kA) { offset = MatrixCoord( access_m_idx * 16, iterations_ * InstructionShape::kColumn); } else { offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0); } if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } cutlass::arch::ldsm<LoadLayout, 4>( access_ptr[access_m_idx], ref_.data() + ref_.offset(offset)); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/iterators/warp_iterator_from_smem.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/iterators/warp_iterator_from_smem.h", "repo_id": "cutlass", "token_count": 3593 }
11
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_conversion.h" namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Size of the accumulation tile shape (concept: MatrixShape) typename AccumulatorShape_, /// KBlocks columns to compute residual int KBlocksColumn_, /// Accumulator Element type typename ElementAccumulator_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Whether beta is zero bool IsBetaZero_ > class MmaTensorOpPureFragmentIterator; // Partial specialization for col-major accumulator tile // And Element type is the same as Accumulator Element type template < /// Shape of warp tile to load (concept: MatrixShape) typename Shape_, /// Shape of the warp accumulation tile (concept: MatrixShape) typename AccumulatorShape_, /// KBlocks columns to compute residual int KBlocksColumn_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_> class MmaTensorOpPureFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, Element_, Element_, cutlass::layout::ColumnMajor, InstructionShape_, true> { public: /// Shape of warp tile to load (concept: MatrixShape) using Shape = Shape_; /// Shape of the warp accumulation tile (concept: MatrixShape) using AccumulatorShape = AccumulatorShape_; /// KBlocks columns to compute residual static int const kKBlockColumn = KBlocksColumn_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Whether beta is zero static bool const IsBetaZero = true; /// Number of participating threads static int const kThreads = 32; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); static_assert( !(AccumulatorShape::kRow % Shape::kRow) && !(AccumulatorShape::kColumn % Shape::kColumn), "Shape of Warp Accumulator must be divisible by warp shape."); static_assert( !(kKBlockColumn % Shape::kColumn), "KBlock size must be divisible by warp shape."); /// Number of times this iterator can be incremented static int const kIterations = AccumulatorShape::kCount / Shape::kCount; }; private: static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads; /// Number of mma operations performed by a warp using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM, Shape::kColumn / InstructionShape::kN>; /// Number of mma operations performed by the entire accumulator using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM, AccumulatorShape::kColumn / InstructionShape::kN>; /// Number of K iterations static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn; static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn; static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn * (AccumulatorShape::kRow / Shape::kRow); static int const kResidualIndex = kResidualColumn / Shape::kColumn * (AccumulatorShape::kRow / Shape::kRow); public: // // Derived quantities // /// Fragment object holding a thread's part of a tile /// This is the fragment size produced by one access of the iterator. using Fragment = Array<Element, Shape::kCount / kThreads>; /// Accumulator Fragment object using AccumulatorFragment = Array<Element, AccumulatorShape::kCount / kThreads>; private: /// Internal access type using AccessType = Array<Element, kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; /// Used to access residual tile first bool is_residual_tile_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator(AccumulatorFragment const &accum) : accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0), is_residual_tile_(true) {} /// Add offset CUTLASS_HOST_DEVICE void add_offset(int index_offset) { index_ += index_offset; if(is_residual_tile_ && index_ >= kKBlockColumnIterations) { index_ = index_ - kKBlockColumnIterations + kResidualIndex; is_residual_tile_ = false; } } /// Increments CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator &operator++() { add_offset(1); return *this; } /// Decrements CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator &operator--() { add_offset(-1); return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag) const { AccessType src_fragment; src_fragment.clear(); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); int index_m = (index_ * MmaIterations::kRow) % AccumulatorIterations::kRow; int index_n = (index_ * MmaIterations::kRow) / AccumulatorIterations::kRow * MmaIterations::kColumn; CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; n++) { for (int m = 0; m < MmaIterations::kRow; m++) { int accumulator_access_offset = (n + index_n) * AccumulatorIterations::kRow + m + index_m; frag_ptr[n * MmaIterations::kRow + m].clear(); if(!(is_residual_tile_ && index_ >= kResidualIndex)) frag_ptr[n * MmaIterations::kRow + m] = accumulators_[accumulator_access_offset]; // frag_ptr[n * MmaIterations::kRow + m] = output_op(accumulators_[accumulator_access_offset], src_fragment); } } } }; // Partial specialization for row-major accumulator tile template < /// Shape of warp tile to load (concept: MatrixShape) typename Shape_, /// Shape of the warp accumulation tile (concept: MatrixShape) typename AccumulatorShape_, /// KBlocks columns to compute residual int KBlocksColumn_, /// Accumulator Element type typename ElementAccumulator_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_> class MmaTensorOpPureFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_, cutlass::layout::RowMajor, InstructionShape_, true> { public: /// Shape of warp tile to load (concept: MatrixShape) using Shape = Shape_; /// Shape of the warp accumulation tile (concept: MatrixShape) using AccumulatorShape = AccumulatorShape_; /// KBlocks columns to compute residual static int const kKBlockColumn = KBlocksColumn_; /// Accumulator Element type using ElementAccumulator = ElementAccumulator_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Whether beta is zero static bool const IsBetaZero = true; /// Number of participating threads static int const kThreads = 32; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); static_assert( !(AccumulatorShape::kRow % Shape::kRow) && !(AccumulatorShape::kColumn % Shape::kColumn), "Shape of Warp Accumulator must be divisible by warp shape."); static_assert( !(kKBlockColumn % Shape::kColumn), "KBlock size must be divisible by warp shape."); /// Number of times this iterator can be incremented static int const kIterations = AccumulatorShape::kCount / Shape::kCount; }; private: static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads; /// Number of mma operations performed by a warp using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM, Shape::kColumn / InstructionShape::kN>; /// Number of mma operations performed by the entire accumulator using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM, AccumulatorShape::kColumn / InstructionShape::kN>; /// Number of K iterations static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn; static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn; static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn * (AccumulatorShape::kRow / Shape::kRow); static int const kResidualIndex = kResidualColumn / Shape::kColumn * (AccumulatorShape::kRow / Shape::kRow); public: // // Derived quantities // /// Fragment object holding a thread's part of a tile /// This is the fragment size produced by one access of the iterator. using Fragment = Array<Element, Shape::kCount / kThreads>; /// Accumulator Fragment object using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>; private: /// Internal access type using AccessType = Array<ElementAccumulator, kElementsPerAccess>; using FragmentAccessType = Array<Element, kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; /// Used to access residual tile first bool is_residual_tile_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator(AccumulatorFragment const &accum) : accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0), is_residual_tile_(true) {} /// Add offset CUTLASS_HOST_DEVICE void add_offset(int index_offset) { index_ += index_offset; if(is_residual_tile_ && index_ >= kKBlockColumnIterations) { index_ = index_ - kKBlockColumnIterations + kResidualIndex; is_residual_tile_ = false; } } /// Increments CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator &operator++() { add_offset(1); return *this; } /// Decrements CUTLASS_HOST_DEVICE MmaTensorOpPureFragmentIterator &operator--() { add_offset(-1); return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag) const { FragmentAccessType src_fragment; src_fragment.clear(); FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag); int index_m = (index_ * MmaIterations::kRow) % AccumulatorIterations::kRow; int index_n = (index_ * MmaIterations::kRow) / AccumulatorIterations::kRow * MmaIterations::kColumn; CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; m++) { for (int n = 0; n < MmaIterations::kColumn; n++) { int accumulator_access_offset = (m + index_m) * AccumulatorIterations::kColumn + n + index_n; frag_ptr[m * MmaIterations::kColumn + n].clear(); if(!(is_residual_tile_ && index_ >= kResidualIndex)) frag_ptr[m * MmaIterations::kColumn + n] = (accumulators_[accumulator_access_offset]); } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h", "repo_id": "cutlass", "token_count": 5122 }
12
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Simple Hopper GEMM example using CUTLASS 3.0 APIs for NVIDIA Hopper architecture This example demonstrate a simple way to instantiate and run a TF32 GEMM using the new CUTLASS 3.0 APIs on NVIDIA Hopper architecture. New features that will be showcased in this example are as follows: 1. NVIDIA Hopper architecture introduces a new series of tensor core instructions (GMMA) which are more efficient than the Ampere tensor core instructions. 2. NVIDIA Hopper architecture includes new Tensor Memory Accelerator (TMA) unit to transfer large blocks of data efficiently between global memory and shared memory. TMA also supports asynchronous copies between thread blocks in a cluster. Another advantage is that TMA can load in FP32 data and convert them implicitly to TF32. 3. This example uses the Warp Specialized kernel design (see /media/docs/efficient_gemm.md for details). Examples: $ ./examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm --m=2048 --n=2048 --k=2048 */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/packed_stride.hpp" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "helper.h" using namespace cute; #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM kernel configurations ///////////////////////////////////////////////////////////////////////////////////////////////// // A matrix configuration using ElementA = float; // Element type for A matrix operand using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) // B matrix configuration using ElementB = float; // Element type for B matrix operand using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) // C/D matrix configuration using ElementC = float; // Element type for C and D matrix operands using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes) // Core kernel configurations using ElementAccumulator = float; // Element type for internal accumulation using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag using TileShape = Shape<_128,_128,_32>; // Threadblock-level tile size using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; // Kernel to launch based on the default setting in the Collective Builder using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementAccumulator, ElementAccumulator, ElementC, LayoutC, AlignmentC, ElementC, LayoutC, AlignmentC, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< ArchTag, OperatorClass, ElementA, LayoutA, AlignmentA, ElementB, LayoutB, AlignmentB, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int>, // Indicates ProblemShape CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; // Reference device GEMM implementation type using DeviceGemmReference = cutlass::reference::device::Gemm< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementAccumulator>; using StrideA = typename Gemm::GemmKernel::StrideA; using StrideB = typename Gemm::GemmKernel::StrideB; using StrideC = typename Gemm::GemmKernel::StrideC; using StrideD = typename Gemm::GemmKernel::StrideD; // // Data members // /// Initialization StrideA stride_A; StrideB stride_B; StrideC stride_C; StrideD stride_D; uint64_t seed; cutlass::DeviceAllocation<typename Gemm::ElementA> block_A; cutlass::DeviceAllocation<typename Gemm::ElementB> block_B; cutlass::DeviceAllocation<typename Gemm::ElementC> block_C; cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D; cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) ///////////////////////////////////////////////////////////////////////////////////////////////// /// Testbed utility types ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; float alpha, beta; int iterations; int m, n, k; Options(): help(false), m(5120), n(4096), k(4096), alpha(1.f), beta(0.f), iterations(1000) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("m", m); cmd.get_cmd_line_argument("n", n); cmd.get_cmd_line_argument("k", k); cmd.get_cmd_line_argument("alpha", alpha, 1.f); cmd.get_cmd_line_argument("beta", beta, 0.f); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "48_hopper_warp_specialized_gemm\n\n" << " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement\n\n" << " --m=<int> Sets the M extent of the GEMM\n" << " --n=<int> Sets the N extent of the GEMM\n" << " --k=<int> Sets the K extent of the GEMM\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << "48_hopper_warp_specialized_gemm" << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add uint64_t flop = uint64_t(2) * m * n * k; double gflop = double(flop) / double(1.0e9); return gflop / runtime_s; } }; /// Result structure struct Result { double avg_runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; Result( double avg_runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess) : avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(false) {} }; #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM setup and evaluation ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to initialize a block of device data template <class Element> bool initialize_block( cutlass::DeviceAllocation<Element>& block, uint64_t seed=2023) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( block.get(), block.size(), seed, scope_max, scope_min, 0); return true; } /// Initialize operands to be used in the GEMM and reference GEMM void initialize(const Options &options) { stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, Int<1>{})); stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(options.n, options.k, Int<1>{})); stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.m, options.n, Int<1>{})); stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.m, options.n, Int<1>{})); block_A.reset(options.m * options.k); block_B.reset(options.k * options.n); block_C.reset(options.m * options.n); block_D.reset(options.m * options.n); block_ref_D.reset(options.m * options.n); initialize_block(block_A, seed + 2023); initialize_block(block_B, seed + 2022); initialize_block(block_C, seed + 2021); } /// Populates a Gemm::Arguments structure from the given commandline options typename Gemm::Arguments args_from_options(const Options &options) { typename Gemm::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, {options.m, options.n, options.k}, {block_A.get(), stride_A, block_B.get(), stride_B}, {{options.alpha, options.beta}, block_C.get(), stride_C, block_D.get(), stride_D} }; return arguments; } bool verify(const Options &options) { cutlass::TensorRef ref_A(block_A.get(), Gemm::LayoutA::packed({options.m, options.k})); cutlass::TensorRef ref_B(block_B.get(), Gemm::LayoutB::packed({options.k, options.n})); cutlass::TensorRef ref_C(block_C.get(), Gemm::LayoutC::packed({options.m, options.n})); cutlass::TensorRef ref_D(block_ref_D.get(), Gemm::LayoutD::packed({options.m, options.n})); // // Compute reference output // // Create instantiation for device reference gemm kernel DeviceGemmReference gemm_reference; // Launch device reference gemm kernel gemm_reference( {options.m, options.n, options.k}, ElementAccumulator(options.alpha), ref_A, ref_B, ElementAccumulator(options.beta), ref_C, ref_D); // Wait for kernel to finish CUDA_CHECK(cudaDeviceSynchronize()); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::device::BlockCompareEqual(block_ref_D.get(), block_D.get(), block_D.size()); return passed; } /// Execute a given example GEMM computation template <typename Gemm> int run(Options &options) { initialize(options); // Instantiate CUTLASS kernel depending on templates Gemm gemm; // Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm auto arguments = args_from_options(options); // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check if the problem size is supported or not CUTLASS_CHECK(gemm.can_implement(arguments)); // Initialize CUTLASS kernel with arguments and workspace pointer CUTLASS_CHECK(gemm.initialize(arguments, workspace.get())); // Correctness / Warmup iteration CUTLASS_CHECK(gemm.run()); // Check if output from CUTLASS kernel and reference kernel are equal or not Result result; result.passed = verify(options); std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl; if (!result.passed) { exit(-1); } // Run profiling loop if (options.iterations > 0) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { CUTLASS_CHECK(gemm.initialize(arguments, workspace.get())); CUTLASS_CHECK(gemm.run()); } timer.stop(); // Compute average runtime and GFLOPs. float elapsed_ms = timer.elapsed_millis(); result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations); result.gflops = options.gflops(result.avg_runtime_ms / 1000.0); std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << std::endl; std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl; std::cout << " GFLOPS: " << result.gflops << std::endl; } return 0; } #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // CUTLASS must be compiled with CUDA 12.0 Toolkit to run this example // and must have compute capability at least 90. if (__CUDACC_VER_MAJOR__ < 12) { std::cerr << "This example requires CUDA 12 or newer.\n"; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } cudaDeviceProp props; int current_device_id; CUDA_CHECK(cudaGetDevice(&current_device_id)); CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id)); cudaError_t error = cudaGetDeviceProperties(&props, 0); if (props.major < 9) { std::cerr << "This example requires a GPU of NVIDIA's Hopper Architecture or " << "later (compute capability 90 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // // Evaluate CUTLASS kernels // #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) run<Gemm>(options); #endif return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm.cu/0
{ "file_path": "cutlass/examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm.cu", "repo_id": "cutlass", "token_count": 6069 }
13
# Example 59: Ampere gather/scatter convolution CuTe and CUTLASS 3.x based Ampere convolution forward propagation kernel capable of operating on both affine and gather/scatter tensors. Example executions: ```sh ./59_ampere_gather_scatter_conv ./59_ampere_gather_scatter_conv --n=108 ./59_ampere_gather_scatter_conv --n=4096 --i=1 ./59_ampere_gather_scatter_conv --n=1080 --i=1000 ./59_ampere_gather_scatter_conv --n=131072 --i=1000 --no-check ``` This example demonstrates a few super cool features of CUTLASS and CuTe. It shows off 1. A dense conv 3D fprop kernel written as a single file ... 2. ... that leverages off-the-shelf CUTLASS collectives to show how custom kernels can use collectives ... 3. ... and uses the exact same templated kernel to also stamp out a gather/scatter 3D fprop conv ... 4. ... while getting near peak performance of the Ampere class tensor core on Ampere and Ada GPUs ... 5. ... by using static cute shapes and strides in case problem shapes are known at compile time. ## A dense conv 3D fprop kernel written in CUTLASS 3.x and CuTe The most common strategy for implementing high performance convolution kernels on the GPU is to transform the activation tensor in such a way that we can perform the computation as a GEMM. This is called the image to column (im2col) transformation. [CUTLASS 2.x implementation of im2col based convolutions is documented separately](../../media/docs/implicit_gemm_convolution.md), and here we consider a fresh approach for CuTe. A 3D convolution has the following input tensors: - Activation tensor (Act): `((N,(D,H,W)), (C,(1,1,1)))` - Filter tensor (Flt): `( K, (C,(T,R,S)))` - Output tensor (Out): `((N,(Z,P,Q)), K )` Where - N := number of images - DHW := spatial dimensions of the activation tensor - C := channel dimension of the activation tensor - K := channel dimension of the filter and output tensor - TRS := spoke dimensions of the filter tensor - ZPQ := spatial dimensions of the output tensor As is evident in the tensor shapes, these cannot be issued to a GEMM just yet, since there is no logical M, N, and K modes we can group the tensor modes into. Notice that every spoke of the filter tensor (TRS) will be applied to some (offset) view of the activation tensor, thus expanding the logical size of the activation tensor. Additionally, a similar logical transform of the spatial dimensions can be encoded as a function of the padding, dilations, traversal strides, and filter spokes. This gets us to our im2col transform: im2col transform affects the component shapes/strides of the activation tensor in the following way: - ZPQ Shape : changes DHW domain with formula `(1 + (DHW + pad - (((TRS-1) * dilation) + 1)) / traversal_stride)` - TRS Shape : TRS domain instead of `(1,1,1)` - ZPQ Strides : Original DHW strides get `elem_scale()`-ed by traversal strides DHW - TRS Strides : Original DHW strides get `elem_scale()`-ed by dilation DHW With this transform applied, we end up with a set of input and output tensors that are logically consistent in their MNK dimensions, thus allowing us to dispatch to a GEMM. im2col activation layout: ((N,(Z,P,Q)), (C,(T,R,S))) // logical (M,K) filter layout : ( K, (C,(T,R,S))) // logical (N,K) output layout : ((N,(Z,P,Q)), K ) // logical (M,N) CuTe's layout representation and algebra make these folded tensors easy to represent and manipulate. This is most evident in the reference check code used in this example: ```cpp for (size_t logical_m = 0; logical_m < size<0>(mOutputRef); ++logical_m) { for (size_t logical_n = 0; logical_n < size<1>(mOutputRef); ++logical_n) { auto accumulator = float(0); for (size_t logical_k = 0; logical_k < size<1>(mStencil); ++logical_k) { accumulator += mStencil(logical_m, logical_k) * mActivation(logical_n, logical_k); } mOutputRef(logical_m, logical_n) = accumulator; } } ``` Which succinctly demonstrates how im2col transform allows us to implement convolutions as GEMMs with special layout transformations on the input tensor. Note: in the example kernel's implementation we treat activations as the B tensor and filter as the A tensor, thus making their logical dimensions NK and MK respectively. ## Leveraging CUTLASS collectives off the shelf in a custom kernel Now that we have transformed our problem in such a way that allows us to dispatch to a GEMM, we can reuse much of the machinery CUTLASS offers to implement this forward pass convolution operator. CUTLASS decomposes these "moving parts" of GPU linear algebra into reusable, modular software components abstracted by C++ template classes. This example demonstrates how some of the lower layers of the hierarchy can be re-used for custom kernels by writing a custom kernel for convolution that re-uses the Ampere/Ada GEMM collectives from CUTLASS 3. A kernel author is free to compose their custom components with any of the existing templates in the CUTLASS hierarchy to leverage existing high performance implementations from the CUTLASS team. In this example, we write a custom kernel layer and compose with an existing collective. However, any of the CUTLASS kernels can be composed with bespoke collectives if the desired customization is a mainloop or epilogue fusion without changes to the grid planning, tile scheduling, load balancing, or thread marshalling. ## Implementing gather/scatter and dense convolution with the same kernel Functionality and correctness of the implemented kernel, as a virtue of using CuTe and off the shelf CUTLASS collectives, only relies on the logical consistency of the layouts of input and output tensors. This means that we can freely change how the logical coordinates of the tensors map into the index space, and even how these dereferences happen. [CUTLASS example 52](../52_hopper_gather_scatter_fusion/) demonstrates this by implementing a custom stride that supports indexed indirection for tensor data accesses. This allows for example 52 to implement a GEMM where inputs are gathered and output is scattered based on an index buffer. We re-use the same custom stride utilities in this example to implement a convolution kernel that gathers along the NDHW dimensions of the activation tensor and scatters the output along the NZPQ dimensions of the output tensor, treating the channel dimensions as the dense vectors. Our dense affine im2col transformed activation tensor: ```cpp // im2col transformed activation layout: ((nzpq), (ctrs)) => idx auto xformed_act_layout = make_layout( make_shape (make_shape ( N, Z, P, Q), make_shape ( C, T, R, S)), make_stride(make_stride(D*H*W*C, H*W*C, W*C, C), make_stride(_1{}, H*W*C, W*C, C))); ``` now becomes a composed layout that uses `IndexedGather`: ```cpp // Inner layout of the composition: // ((nzpq), (csrt)) => (idx_buffer_idx, dense_offset) auto EG = E<0>{}; // Gather basis (1,0) (idx_buffer_idx) auto EC = E<1>{}; // Contiguous basis (0,1) (dense_offset) auto xformed_act_logical_inner = make_layout( make_shape (make_shape ( N, Z, P, Q), make_shape ( C, T, R, S)), make_stride(make_stride(D*H*W*EG, H*W*EG, W*EG, EG), make_stride(EC, H*W*EG, W*EG, EG))); // Outer layout of the composition: // (idx_buffer_idx, dense_offset) => idx // IndexedGather obtains idx by applying (gmem_base_ptr + gather_idx_buf[idx_buffer_idx] + dense_offset) auto xformed_act_gather_outer = make_layout( make_shape(_1{},_1{}), make_stride(CustomStride{IndexedGather{gather_idx_buf}, C}, _1{})); // Compose the inner and outer layouts // ((nzpq), (ctrs)) => idx auto xformed_act_composed_layout = composition( xformed_act_gather_outer, make_arithmetic_tuple(_0{}, _0{}), xformed_act_logical_inner); ``` Here, we create a composed layout whose inner layout has the same logical MK shape as earlier, but with an outer layout that uses the custom strides with an index buffer to access memory with indirections. A custom stride requires two inputs to compute the index that a certain coordinate maps to: the index buffer offset and the dense offset into the vector. This entails that our inner layout (the one with the logical MK shape) has a rank-2 codomain `(idx_buffer_idx, dense_offset)`. We can set up such a layout with scaled basis strides, which allow us to map a domain onto a codomain with multiple orthogonal bases. The two codomain basis are the index buffer offsets (rank 0 basis), and the dense vector offsets (rank 1 basis). A similar composed layout is set up for the output scatter tensor. This tensor still has a logical MK shape and is backed by a CuTe layout, which means we can still tile, partition, and otherwise manipulate it with CuTe's layout algebra in the same way we would any other tensor. Substituting the activation tensor's affine layout for this gather layout requires no changes to the implementation of the kernel whatsoever. Everything composes. This example stamps out a dense 3D convolution as well as gather/scatter 3D convolution using the same kernel template, with the only difference between them being the layouts of the input and output tensors. Convolutions are just a special case of tensor contractions, and as [example 51](../51_hopper_gett) demonstrates, the exact same collective used in this example can also be used to implement arbitrary GETTs. Of course, this also means that the same kernel can implement gather/scatter GETTs as well! This demonstrates the composition power of not just CuTe, but also CUTLASS 3's two level micro kernel abstraction. A single highly tuned temporal micro-kernel (collective) can be implemented once and applied to compute dense GETTs, gather/scatter GETTs, dense convolutions, and gather/scatter convolutions. ## Peak performance on Ampere and Ada GPUs by leveraging domain specific knowledge Often, when implementing custom kernels, a user has more knowledge of the problem domain that can be exploited to deliver higher performance than otherwise could be through general kernels. In this example we presume that the shape of each of the images (DHWC dimensions) as well as the filter (TRS) are available a-priori and that the tile shape evenly divides the problem. Number of images (N) is still left as a runtime parameter. Knowing the extents of our tensors at compile time allows us to encode them as static cute shapes rather than a dynamic problem shape, resulting in the elimination of most of the index computation instructions such as expensive div/mods. Knowing that the problem shape is divisible by the tile shape allows us to use the Ampere collective that does not perform predication on global memory loads, further reducing overheads and allowing us to achieve near peak performance on RTX Ampere and Ada GPUs. Running this example on an RTX 3080Ti prints the following performance numbers (some output culled for brevity): ``` $> ./examples/59_ampere_gather_scatter_conv/59_ampere_gather_scatter_conv --n=131072 --i=128 --no-check Ampere convolution forward propogation kernel supporting both affine and gather/scatter tensors. Allocating tensors ... done. Initializing data ... done. Initializing gather/scatter index buffers ... done. Running dense fprop kernel Conv TFLOP count = 0.927713 Conv dense perf: 31.027376ms | TFLOP/s = 29.899819 Running gather/scatter fprop kernel Conv TFLOP count = 0.927713 Conv gather/scatter perf: 28.973721ms | TFLOP/s = 32.019117 ``` With this in mind, this example kernel has the following limitations: - This example kernel only supports dynamic image count, all other conv problem shape must be defined as `cute::Constant<>`s - Problem shapes (including dynamic image count `N`) must be evenly divisible by the tile shape - It does not perform fp32->tf32 numeric conversion, gmem inputs must be rounded to tf32 already
cutlass/examples/59_ampere_gather_scatter_conv/README.md/0
{ "file_path": "cutlass/examples/59_ampere_gather_scatter_conv/README.md", "repo_id": "cutlass", "token_count": 3469 }
14
<jupyter_start><jupyter_text>Example of using elementwise activation functions in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/01_epilogue.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np import cutlass # This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to # omit this information. print_module = True m = 256 n = m k = m type_A = np.float16 type_B = np.float16 type_C = np.float16 type_D = np.float16 np.random.seed(1234) scope_min = -4 scope_max = 4 tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A)) tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B)) tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C)) alpha = np.float16(1.) beta = np.float16(0.) tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output><empty_output><jupyter_text>Run a GEMM with an identity activation functionTo begin, we simply run a default GEMM with an identity activation function. This performs the well-known operation `D = alpha * (A @ B) + beta * C`. This is the default activation function used, and does not need to be specified.<jupyter_code>plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor) plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Run a GEMM with a ReLU element-wise activation functionCUTLASS makes it easy to support other element-wise activation functions. This results in performing an element-wise after the generic linear combination performed in a GEMM. If we call such an activation function `act`, the resulting formulation is:```D = alpha * (A @ B) + beta * CD = act(D)```Here, we will add a ReLU activation function. Given an input `x`, ReLU returns `max(x, 0)`.This is easy to do in CUTLASS. One only needs to set the plan's `activation` field.<jupyter_code>tensor_D_relu = np.zeros(tensor_C.shape).astype(type_D) plan.activation = "relu" plan.run(tensor_A, tensor_B, tensor_C, tensor_D_relu, print_module=print_module)<jupyter_output><empty_output><jupyter_text>We can now verify that the result of the GEMM that used a ReLU activation function:<jupyter_code>relu_ref = (tensor_D >= 0).astype(type_D) * tensor_D np.testing.assert_array_equal(relu_ref, tensor_D_relu)<jupyter_output><empty_output><jupyter_text>Other element-wise activation functionsCUTLASS supports a variety of widely-used element-wise activation functions. We can obtain a list of these functions via the `get_activations()` method.<jupyter_code>activations = plan.activations() for activation in activations: print(activation)<jupyter_output><empty_output><jupyter_text>We can then run each of them:<jupyter_code>for activation in activations: print('=============================================================================================') print(f'Compiling and running activation {activation}') print('=============================================================================================') plan.activation = activation plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>To add an activation with parameter such as `leaky_relu`, a tuple should be provided containing the activation function name and the (or a list of) parameter.<jupyter_code>negative_slope = 0.5 plan.activation = ("leaky_relu", negative_slope) plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output>
cutlass/examples/python/01_epilogue.ipynb/0
{ "file_path": "cutlass/examples/python/01_epilogue.ipynb", "repo_id": "cutlass", "token_count": 1412 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> #include <cute/container/tuple.hpp> #include <cute/algorithm/functional.hpp> #include <cute/numeric/integer_sequence.hpp> #include <cute/numeric/integral_constant.hpp> /// @file tuple_algorithms.hpp /// @brief Common algorithms on (hierarchical) tuples /// /// Code guidelines and style preferences: /// /// For perfect forwarding, don't use std::forward, because it may not /// be defined in device code when compiling with NVRTC. Instead, use /// `static_cast<ParameterType&&>(parameter_name)`. /// /// CuTe generally does not bother forwarding functions, as /// reference-qualified member functions are rare in this code base. /// /// Throughout CUTLASS, cute::make_tuple always needs to be called /// namespace-qualified, EVEN If inside the cute namespace and/or in /// scope of a "using namespace cute" declaration. Otherwise, the /// compiler may select std::make_tuple instead of cute::make_tuple, /// due to argument-dependent lookup. Two problems may result from /// that. /// /// 1. Functions have an unexpected return type (std::tuple instead of /// cute::tuple), so functions that take cute::tuple parameters /// fail to compile (generally inside functions that have template /// parameters expected to be cute::tuple). /// /// 2. std::tuple does not have the required __host__ __device__ /// markings, so the CUDA compiler complains if you use it in /// device code. /// /// cute::make_tuple will occur more often than std::make_tuple would /// in modern C++ code, because cute::tuple's design deprioritizes /// correct operation of CTAD (constructor template argument /// deduction) in favor of implementation simplicity. namespace cute { // // Apply (Unpack) // (t, f) => f(t_0,t_1,...,t_n) // namespace detail { template <class T, class F, int... I> CUTE_HOST_DEVICE constexpr auto apply(T&& t, F&& f, seq<I...>) { return f(get<I>(static_cast<T&&>(t))...); } } // end namespace detail template <class T, class F> CUTE_HOST_DEVICE constexpr auto apply(T&& t, F&& f) { return detail::apply(static_cast<T&&>(t), f, tuple_seq<T>{}); } // // Transform Apply // (t, f, g) => g(f(t_0),f(t_1),...) // namespace detail { template <class T, class F, class G, int... I> CUTE_HOST_DEVICE constexpr auto tapply(T&& t, F&& f, G&& g, seq<I...>) { return g(f(get<I>(static_cast<T&&>(t)))...); } template <class T0, class T1, class F, class G, int... I> CUTE_HOST_DEVICE constexpr auto tapply(T0&& t0, T1&& t1, F&& f, G&& g, seq<I...>) { return g(f(get<I>(static_cast<T0&&>(t0)), get<I>(static_cast<T1&&>(t1)))...); } template <class T0, class T1, class T2, class F, class G, int... I> CUTE_HOST_DEVICE constexpr auto tapply(T0&& t0, T1&& t1, T2&& t2, F&& f, G&& g, seq<I...>) { return g(f(get<I>(static_cast<T0&&>(t0)), get<I>(static_cast<T1&&>(t1)), get<I>(static_cast<T2&&>(t2)))...); } } // end namespace detail template <class T, class F, class G> CUTE_HOST_DEVICE constexpr auto transform_apply(T&& t, F&& f, G&& g) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return detail::tapply(static_cast<T&&>(t), f, g, tuple_seq<T>{}); } else { return g(f(static_cast<T&&>(t))); } } template <class T0, class T1, class F, class G> CUTE_HOST_DEVICE constexpr auto transform_apply(T0&& t0, T1&& t1, F&& f, G&& g) { if constexpr (is_tuple<remove_cvref_t<T0>>::value) { return detail::tapply(static_cast<T0&&>(t0), static_cast<T1&&>(t1), f, g, tuple_seq<T0>{}); } else { return g(f(static_cast<T0&&>(t0), static_cast<T1&&>(t1))); } } template <class T0, class T1, class T2, class F, class G> CUTE_HOST_DEVICE constexpr auto transform_apply(T0&& t0, T1&& t1, T2&& t2, F&& f, G&& g) { if constexpr (is_tuple<remove_cvref_t<T0>>::value) { return detail::tapply(static_cast<T0&&>(t0), static_cast<T1&&>(t1), static_cast<T2&&>(t2), f, g, tuple_seq<T0>{}); } else { return g(f(static_cast<T0&&>(t0), static_cast<T1&&>(t1), static_cast<T2&&>(t2))); } } // // For Each // (t, f) => f(t_0),f(t_1),...,f(t_n) // template <class T, class F> CUTE_HOST_DEVICE constexpr void for_each(T&& t, F&& f) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return detail::apply(t, [&](auto&&... a) { (f(static_cast<decltype(a)&&>(a)), ...); }, tuple_seq<T>{}); } else { return f(static_cast<T&&>(t)); } CUTE_GCC_UNREACHABLE; } template <class T, class F> CUTE_HOST_DEVICE constexpr auto for_each_leaf(T&& t, F&& f) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return detail::apply(static_cast<T&&>(t), [&](auto&&... a){ return (for_each_leaf(static_cast<decltype(a)&&>(a), f), ...); }, tuple_seq<T>{}); } else { return f(static_cast<T&&>(t)); } CUTE_GCC_UNREACHABLE; } // // Transform // (t, f) => (f(t_0),f(t_1),...,f(t_n)) // template <class T, class F> CUTE_HOST_DEVICE constexpr auto transform(T const& t, F&& f) { if constexpr (is_tuple<T>::value) { return detail::tapply(t, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T>{}); } else { return f(t); } CUTE_GCC_UNREACHABLE; } template <class T0, class T1, class F> CUTE_HOST_DEVICE constexpr auto transform(T0 const& t0, T1 const& t1, F&& f) { if constexpr (is_tuple<T0>::value) { static_assert(tuple_size<T0>::value == tuple_size<T1>::value, "Mismatched tuple_size"); return detail::tapply(t0, t1, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T0>{}); } else { return f(t0, t1); } CUTE_GCC_UNREACHABLE; } template <class T0, class T1, class T2, class F> CUTE_HOST_DEVICE constexpr auto transform(T0 const& t0, T1 const& t1, T2 const& t2, F&& f) { if constexpr (is_tuple<T0>::value) { static_assert(tuple_size<T0>::value == tuple_size<T1>::value, "Mismatched tuple_size"); static_assert(tuple_size<T0>::value == tuple_size<T2>::value, "Mismatched tuple_size"); return detail::tapply(t0, t1, t2, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T0>{}); } else { return f(t0, t1, t2); } CUTE_GCC_UNREACHABLE; } template <class T, class F> CUTE_HOST_DEVICE constexpr auto transform_leaf(T const& t, F&& f) { if constexpr (is_tuple<T>::value) { return transform(t, [&](auto const& a) { return transform_leaf(a, f); }); } else { return f(t); } CUTE_GCC_UNREACHABLE; } template <class T0, class T1, class F> CUTE_HOST_DEVICE constexpr auto transform_leaf(T0 const& t0, T1 const& t1, F&& f) { if constexpr (is_tuple<T0>::value) { return transform(t0, t1, [&](auto const& a, auto const& b) { return transform_leaf(a, b, f); }); } else { return f(t0, t1); } CUTE_GCC_UNREACHABLE; } // // find and find_if // namespace detail { template <class T, class F, int I, int... Is> CUTE_HOST_DEVICE constexpr auto find_if(T const& t, F&& f, seq<I,Is...>) { if constexpr (decltype(f(get<I>(t)))::value) { return cute::C<I>{}; } else if constexpr (sizeof...(Is) == 0) { return cute::C<I+1>{}; } else { return find_if(t, f, seq<Is...>{}); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class T, class F> CUTE_HOST_DEVICE constexpr auto find_if(T const& t, F&& f) { if constexpr (is_tuple<T>::value) { return detail::find_if(t, f, tuple_seq<T>{}); } else { return cute::C<decltype(f(t))::value ? 0 : 1>{}; } CUTE_GCC_UNREACHABLE; } template <class T, class X> CUTE_HOST_DEVICE constexpr auto find(T const& t, X const& x) { return find_if(t, [&](auto const& v) { return v == x; }); // This should always return a static true/false } template <class T, class F> CUTE_HOST_DEVICE constexpr auto any_of(T const& t, F&& f) { if constexpr (is_tuple<T>::value) { return detail::apply(cute::transform(t, f), [&] (auto const&... a) { return (false_type{} || ... || a); }, tuple_seq<T>{}); } else { return f(t); } CUTE_GCC_UNREACHABLE; } template <class T, class F> CUTE_HOST_DEVICE constexpr auto all_of(T const& t, F&& f) { if constexpr (is_tuple<T>::value) { return detail::apply(t, [&] (auto const&... a) { return (true_type{} && ... && f(a)); }, tuple_seq<T>{}); } else { return f(t); } CUTE_GCC_UNREACHABLE; } template <class T, class F> CUTE_HOST_DEVICE constexpr auto none_of(T const& t, F&& f) { return not any_of(t, f); } // // Filter // (t, f) => <f(t_0),f(t_1),...,f(t_n)> // template <class T, class F> CUTE_HOST_DEVICE constexpr auto filter_tuple(T const& t, F&& f) { return transform_apply(t, f, [](auto const&... a) { return cute::tuple_cat(a...); }); } template <class T0, class T1, class F> CUTE_HOST_DEVICE constexpr auto filter_tuple(T0 const& t0, T1 const& t1, F&& f) { return transform_apply(t0, t1, f, [](auto const&... a) { return cute::tuple_cat(a...); }); } template <class T0, class T1, class T2, class F> CUTE_HOST_DEVICE constexpr auto filter_tuple(T0 const& t0, T1 const& t1, T2 const& t2, F&& f) { return transform_apply(t0, t1, t2, f, [](auto const&... a) { return cute::tuple_cat(a...); }); } // // Fold (Reduce, Accumulate) // (t, v, f) => f(...f(f(v,t_0),t_1),...,t_n) // namespace detail { // This impl compiles much faster than cute::apply and variadic args template <class T, class V, class F> CUTE_HOST_DEVICE constexpr decltype(auto) fold(T&& t, V&& v, F&& f, seq<>) { return static_cast<V&&>(v); } template <class T, class V, class F, int I, int... Is> CUTE_HOST_DEVICE constexpr decltype(auto) fold(T&& t, V&& v, F&& f, seq<I,Is...>) { if constexpr (sizeof...(Is) == 0) { return f(static_cast<V&&>(v), get<I>(static_cast<T&&>(t))); } else { return fold(static_cast<T&&>(t), f(static_cast<V&&>(v), get<I>(static_cast<T&&>(t))), f, seq<Is...>{}); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class T, class V, class F> CUTE_HOST_DEVICE constexpr auto fold(T&& t, V&& v, F&& f) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return detail::fold(static_cast<T&&>(t), static_cast<V&&>(v), f, tuple_seq<T>{}); } else { return f(static_cast<V&&>(v), static_cast<T&&>(t)); } CUTE_GCC_UNREACHABLE; } template <class T, class F> CUTE_HOST_DEVICE constexpr decltype(auto) fold_first(T&& t, F&& f) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return detail::fold(static_cast<T&&>(t), get<0>(static_cast<T&&>(t)), f, make_range<1,tuple_size<remove_cvref_t<T>>::value>{}); } else { return static_cast<T&&>(t); } CUTE_GCC_UNREACHABLE; } // // front, back, take, select, unwrap // // Get the first non-tuple element in a hierarchical tuple template <class T> CUTE_HOST_DEVICE constexpr decltype(auto) front(T&& t) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { return front(get<0>(static_cast<T&&>(t))); } else { return static_cast<T&&>(t); } CUTE_GCC_UNREACHABLE; } // Get the last non-tuple element in a hierarchical tuple template <class T> CUTE_HOST_DEVICE constexpr decltype(auto) back(T&& t) { if constexpr (is_tuple<remove_cvref_t<T>>::value) { constexpr int N = tuple_size<remove_cvref_t<T>>::value; // MSVC needs a bit of extra help here deducing return types. // We help it by peeling off the nonrecursive case a level "early." if constexpr (! is_tuple<remove_cvref_t<decltype(get<N - 1>(static_cast<T&&>(t)))>>::value) { return get<N - 1>(static_cast<T&&>(t)); } else { return back(get<N - 1>(static_cast<T&&>(t))); } } else { return static_cast<T&&>(t); } CUTE_GCC_UNREACHABLE; } // Takes the elements in the range [B,E) template <int B, int E, class T> CUTE_HOST_DEVICE constexpr auto take(T const& t) { return detail::apply(t, [](auto const&... a) { return cute::make_tuple(a...); }, make_range<B,E>{}); } // // Select tuple elements with given indices. // template <int... I, class T> CUTE_HOST_DEVICE constexpr auto select(T const& t) { return cute::make_tuple(get<I>(t)...); } template <class T, class Indices> CUTE_HOST_DEVICE constexpr auto select(T const& t, Indices const& indices) { if constexpr (is_tuple<Indices>::value) { return cute::transform(indices, [&t](auto i) { return select(t, i); }); } else { static_assert(is_static<Indices>::value, "Order must be static"); return get<Indices::value>(t); } } // Wrap non-tuples into rank-1 tuples or forward template <class T> CUTE_HOST_DEVICE constexpr auto wrap(T const& t) { if constexpr (is_tuple<T>::value) { return t; } else { return cute::make_tuple(t); } CUTE_GCC_UNREACHABLE; } // Unwrap rank-1 tuples until we're left with a rank>1 tuple or a non-tuple template <class T> CUTE_HOST_DEVICE constexpr auto unwrap(T const& t) { if constexpr (is_tuple<T>::value) { if constexpr (tuple_size<T>::value == 1) { return unwrap(get<0>(t)); } else { return t; } } else { return t; } CUTE_GCC_UNREACHABLE; } // // Flatten and Unflatten // template <class T> struct is_flat : true_type {}; template <class... Ts> struct is_flat<tuple<Ts...>> : bool_constant<(true && ... && (not is_tuple<Ts>::value))> {}; // Flatten a hierarchical tuple to a tuple of depth one // and wrap non-tuples into a rank-1 tuple. template <class T> CUTE_HOST_DEVICE constexpr auto flatten_to_tuple(T const& t) { if constexpr (is_tuple<T>::value) { if constexpr (is_flat<T>::value) { // Shortcut for perf return t; } else { return filter_tuple(t, [](auto const& a) { return flatten_to_tuple(a); }); } } else { return cute::make_tuple(t); } CUTE_GCC_UNREACHABLE; } // Flatten a hierarchical tuple to a tuple of depth one // and leave non-tuple untouched. template <class T> CUTE_HOST_DEVICE constexpr auto flatten(T const& t) { if constexpr (is_tuple<T>::value) { if constexpr (is_flat<T>::value) { // Shortcut for perf return t; } else { return filter_tuple(t, [](auto const& a) { return flatten_to_tuple(a); }); } } else { return t; } CUTE_GCC_UNREACHABLE; } namespace detail { template <class FlatTuple, class TargetProfile> CUTE_HOST_DEVICE constexpr auto unflatten_impl(FlatTuple const& flat_tuple, TargetProfile const& target_profile) { if constexpr (is_tuple<TargetProfile>::value) { return fold(target_profile, cute::make_tuple(cute::make_tuple(), flat_tuple), [](auto const& v, auto const& t) { auto [result, remaining_tuple] = v; auto [sub_result, sub_tuple] = unflatten_impl(remaining_tuple, t); return cute::make_tuple(append(result, sub_result), sub_tuple); }); } else { return cute::make_tuple(get<0>(flat_tuple), take<1, decltype(rank(flat_tuple))::value>(flat_tuple)); } CUTE_GCC_UNREACHABLE; } } // end namespace detail // Unflatten a flat tuple into a hierarchical tuple // @pre flatten(@a flat_tuple) == @a flat_tuple // @pre rank(flatten(@a target_profile)) == rank(@a flat_tuple) // @post congruent(@a result, @a target_profile) // @post flatten(@a result) == @a flat_tuple template <class FlatTuple, class TargetProfile> CUTE_HOST_DEVICE constexpr auto unflatten(FlatTuple const& flat_tuple, TargetProfile const& target_profile) { auto [unflatten_tuple, flat_remainder] = detail::unflatten_impl(flat_tuple, target_profile); CUTE_STATIC_ASSERT_V(rank(flat_remainder) == Int<0>{}); return unflatten_tuple; } // // insert and remove and replace // namespace detail { // Shortcut around cute::tuple_cat for common insert/remove/repeat cases template <class T, class X, int... I, int... J, int... K> CUTE_HOST_DEVICE constexpr auto construct(T const& t, X const& x, seq<I...>, seq<J...>, seq<K...>) { return cute::make_tuple(get<I>(t)..., (void(J),x)..., get<K>(t)...); } } // end namespace detail // Insert x into the Nth position of the tuple template <int N, class T, class X> CUTE_HOST_DEVICE constexpr auto insert(T const& t, X const& x) { return detail::construct(t, x, make_seq<N>{}, seq<0>{}, make_range<N,tuple_size<T>::value>{}); } // Remove the Nth element of the tuple template <int N, class T> CUTE_HOST_DEVICE constexpr auto remove(T const& t) { return detail::construct(t, 0, make_seq<N>{}, seq<>{}, make_range<N+1,tuple_size<T>::value>{}); } // Replace the Nth element of the tuple with x template <int N, class T, class X> CUTE_HOST_DEVICE constexpr auto replace(T const& t, X const& x) { return detail::construct(t, x, make_seq<N>{}, seq<0>{}, make_range<N+1,tuple_size<T>::value>{}); } // Replace the first element of the tuple with x template <class T, class X> CUTE_HOST_DEVICE constexpr auto replace_front(T const& t, X const& x) { if constexpr (is_tuple<T>::value) { return detail::construct(t, x, seq<>{}, seq<0>{}, make_range<1,tuple_size<T>::value>{}); } else { return x; } CUTE_GCC_UNREACHABLE; } // Replace the last element of the tuple with x template <class T, class X> CUTE_HOST_DEVICE constexpr auto replace_back(T const& t, X const& x) { if constexpr (is_tuple<T>::value) { return detail::construct(t, x, make_seq<tuple_size<T>::value-1>{}, seq<0>{}, seq<>{}); } else { return x; } CUTE_GCC_UNREACHABLE; } // // Make a tuple of Xs of tuple_size N // template <int N, class X> CUTE_HOST_DEVICE constexpr auto tuple_repeat(X const& x) { return detail::construct(0, x, seq<>{}, make_seq<N>{}, seq<>{}); } // // Make repeated Xs of rank N // template <int N, class X> CUTE_HOST_DEVICE constexpr auto repeat(X const& x) { if constexpr (N == 1) { return x; } else { return detail::construct(0, x, seq<>{}, make_seq<N>{}, seq<>{}); } CUTE_GCC_UNREACHABLE; } // // Make a tuple of Xs the same profile as tuple T // template <class T, class X> CUTE_HOST_DEVICE constexpr auto repeat_like(T const& t, X const& x) { if constexpr (is_tuple<T>::value) { return transform(t, [&](auto const& a) { return repeat_like(a,x); }); } else { return x; } CUTE_GCC_UNREACHABLE; } // Group the elements [B,E) of a T into a single element // e.g. group<2,4>(T<_1,_2,_3,_4,_5,_6>{}) // => T<_1,_2,T<_3,_4>,_5,_6>{} template <int B, int E, class T> CUTE_HOST_DEVICE constexpr auto group(T const& t) { if constexpr (not is_tuple<T>::value) { if constexpr (E == -1) { return group<B,1>(t); } else { return detail::construct(t, take<B,E>(t), make_seq<B>{}, make_seq<(B < E)>{}, make_range<E,1>{}); } } else if constexpr (E == -1) { return group<B,tuple_size<T>::value>(t); } else if constexpr (B <= E) { return detail::construct(t, take<B,E>(t), make_seq<B>{}, make_seq<(B < E)>{}, make_range<E,tuple_size<T>::value>{}); } else { static_assert(B <= E); } CUTE_GCC_UNREACHABLE; } // // Extend a T to rank N by appending/prepending an element // template <int N, class T, class X> CUTE_HOST_DEVICE constexpr auto append(T const& a, X const& x) { if constexpr (is_tuple<T>::value) { if constexpr (N == tuple_size<T>::value) { return a; } else { static_assert(N > tuple_size<T>::value); return detail::construct(a, x, make_seq<tuple_size<T>::value>{}, make_seq<N-tuple_size<T>::value>{}, seq<>{}); } } else { if constexpr (N == 1) { return a; } else { return detail::construct(cute::make_tuple(a), x, seq<0>{}, make_seq<N-1>{}, seq<>{}); } } CUTE_GCC_UNREACHABLE; } template <class T, class X> CUTE_HOST_DEVICE constexpr auto append(T const& a, X const& x) { if constexpr (is_tuple<T>::value) { return detail::construct(a, x, make_seq<tuple_size<T>::value>{}, seq<0>{}, seq<>{}); } else { return cute::make_tuple(a, x); } CUTE_GCC_UNREACHABLE; } template <int N, class T, class X> CUTE_HOST_DEVICE constexpr auto prepend(T const& a, X const& x) { if constexpr (is_tuple<T>::value) { if constexpr (N == tuple_size<T>::value) { return a; } else { static_assert(N > tuple_size<T>::value); return detail::construct(a, x, seq<>{}, make_seq<N-tuple_size<T>::value>{}, make_seq<tuple_size<T>::value>{}); } } else { if constexpr (N == 1) { return a; } else { static_assert(N > 1); return detail::construct(cute::make_tuple(a), x, seq<>{}, make_seq<N-1>{}, seq<0>{}); } } CUTE_GCC_UNREACHABLE; } template <class T, class X> CUTE_HOST_DEVICE constexpr auto prepend(T const& a, X const& x) { if constexpr (is_tuple<T>::value) { return detail::construct(a, x, seq<>{}, seq<0>{}, make_seq<tuple_size<T>::value>{}); } else { return cute::make_tuple(x, a); } CUTE_GCC_UNREACHABLE; } // // Inclusive scan (prefix sum) // namespace detail { template <class T, class V, class F, int I, int... Is> CUTE_HOST_DEVICE constexpr auto iscan(T const& t, V const& v, F&& f, seq<I,Is...>) { // Apply the function to v and the element at I auto v_next = f(v, get<I>(t)); // Replace I with v_next auto t_next = replace<I>(t, v_next); #if 0 std::cout << "ISCAN i" << I << std::endl; std::cout << " t " << t << std::endl; std::cout << " i " << v << std::endl; std::cout << " f(i,t) " << v_next << std::endl; std::cout << " t_n " << t_next << std::endl; #endif if constexpr (sizeof...(Is) == 0) { return t_next; } else { return iscan(t_next, v_next, f, seq<Is...>{}); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class T, class V, class F> CUTE_HOST_DEVICE constexpr auto iscan(T const& t, V const& v, F&& f) { return detail::iscan(t, v, f, tuple_seq<T>{}); } // // Exclusive scan (prefix sum) // namespace detail { template <class T, class V, class F, int I, int... Is> CUTE_HOST_DEVICE constexpr auto escan(T const& t, V const& v, F&& f, seq<I,Is...>) { if constexpr (sizeof...(Is) == 0) { // Replace I with v return replace<I>(t, v); } else { // Apply the function to v and the element at I auto v_next = f(v, get<I>(t)); // Replace I with v auto t_next = replace<I>(t, v); #if 0 std::cout << "ESCAN i" << I << std::endl; std::cout << " t " << t << std::endl; std::cout << " i " << v << std::endl; std::cout << " f(i,t) " << v_next << std::endl; std::cout << " t_n " << t_next << std::endl; #endif // Recurse return escan(t_next, v_next, f, seq<Is...>{}); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class T, class V, class F> CUTE_HOST_DEVICE constexpr auto escan(T const& t, V const& v, F&& f) { return detail::escan(t, v, f, tuple_seq<T>{}); } // // Zip (Transpose) // // Take ((a,b,c,...),(x,y,z,...),...) rank-R0 x rank-R1 input // to produce ((a,x,...),(b,y,...),(c,z,...),...) rank-R1 x rank-R0 output namespace detail { template <int J, class... Ts> CUTE_HOST_DEVICE constexpr auto zip_(Ts const&... ts) { return cute::make_tuple(get<J>(ts)...); } template <class T, int... Is, int... Js> CUTE_HOST_DEVICE constexpr auto zip(T const& t, seq<Is...>, seq<Js...>) { static_assert(conjunction<bool_constant<tuple_size<tuple_element_t<0,T>>::value == tuple_size<tuple_element_t<Is,T>>::value>...>::value, "Mismatched Ranks"); return cute::make_tuple(zip_<Js>(get<Is>(t)...)...); } } // end namespace detail template <class T> CUTE_HOST_DEVICE constexpr auto zip(T const& t) { if constexpr (is_tuple<T>::value) { if constexpr (is_tuple<tuple_element_t<0,T>>::value) { return detail::zip(t, tuple_seq<T>{}, tuple_seq<tuple_element_t<0,T>>{}); } else { return cute::make_tuple(t); } } else { return t; } CUTE_GCC_UNREACHABLE; } // Convenient to pass them in separately template <class T0, class T1, class... Ts> CUTE_HOST_DEVICE constexpr auto zip(T0 const& t0, T1 const& t1, Ts const&... ts) { return zip(cute::make_tuple(t0, t1, ts...)); } // // zip2_by -- A guided zip for rank-2 tuples // Take a tuple like ((A,a),((B,b),(C,c)),d) // and produce a tuple ((A,(B,C)),(a,(b,c),d)) // where the rank-2 modes are selected by the terminals of the guide (X,(X,X)) // namespace detail { template <class T, class TG, int... Is, int... Js> CUTE_HOST_DEVICE constexpr auto zip2_by(T const& t, TG const& guide, seq<Is...>, seq<Js...>) { // zip2_by produces the modes like ((A,a),(B,b),...) auto split = cute::make_tuple(zip2_by(get<Is>(t), get<Is>(guide))...); // Rearrange and append missing modes from t to make ((A,B,...),(a,b,...,x,y)) return cute::make_tuple(cute::make_tuple(get<0>(get<Is>(split))...), cute::make_tuple(get<1>(get<Is>(split))..., get<Js>(t)...)); } } // end namespace detail template <class T, class TG> CUTE_HOST_DEVICE constexpr auto zip2_by(T const& t, TG const& guide) { if constexpr (is_tuple<TG>::value) { constexpr int TR = tuple_size<T>::value; constexpr int GR = tuple_size<TG>::value; static_assert(TR >= GR, "Mismatched ranks"); return detail::zip2_by(t, guide, make_range< 0, GR>{}, make_range<GR, TR>{}); } else { static_assert(tuple_size<T>::value == 2, "Mismatched ranks"); return t; } CUTE_GCC_UNREACHABLE; } /// @return A tuple of the elements of @c t in reverse order. template <class T> CUTE_HOST_DEVICE constexpr auto reverse(T const& t) { if constexpr (is_tuple<T>::value) { return detail::apply(t, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_rseq<T>{}); } else { return t; } } } // end namespace cute
cutlass/include/cute/algorithm/tuple_algorithms.hpp/0
{ "file_path": "cutlass/include/cute/algorithm/tuple_algorithms.hpp", "repo_id": "cutlass", "token_count": 11461 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/mma.hpp> // Config #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && defined(__CUDA_ARCH_FEAT_SM90_ALL)) # define CUTE_ARCH_MMA_SM90A_ENABLED #endif namespace cute { //////////////////////////////////////////////////////////////////////////////////////////////////// // Warpgroup sync primitives CUTE_HOST_DEVICE void warpgroup_arrive() { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile ("wgmma.fence.sync.aligned;\n" ::: "memory"); #else CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.fence without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } template <int N> CUTE_HOST_DEVICE void warpgroup_wait() { static_assert(N >= 0 && N <= 7, "WGMMA wait: N must be in range [0, 7]"); #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile("wgmma.wait_group.sync.aligned %0;\n" :: "n"(N) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.wait_group<N> without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } // Marks the commit point for one or more sized batch of warpgroup MMAs. CUTE_HOST_DEVICE void warpgroup_commit_batch() { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile("wgmma.commit_group.sync.aligned;\n" ::: "memory"); #else CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.commit_group without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } CUTE_HOST_DEVICE void warpgroup_fence_operand(uint32_t& reg) { // MSVC emits a build error for 'asm volatile' // even if it only occurs in a __device__ function. // This prevents the error. #if defined(__CUDA_ARCH__) asm volatile("" : "+r"(reg) :: "memory"); #endif } CUTE_HOST_DEVICE void warpgroup_fence_operand(float& reg) { #if defined(__CUDA_ARCH__) asm volatile("" : "+f"(reg) :: "memory"); #endif } namespace GMMA { enum class Major { K = 0, MN = 1 }; enum class ScaleOut { Zero = 0, One = 1 }; enum class ScaleIn { Neg = -1, One = 1 }; } // namespace GMMA //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA PTX definitions: C = (scaleA * A) * (scaleB * B) + (scaleD * C) //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %4, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f16.f16.f16 " "{%0, %1}," " %2," " %3," " p, %5, %6, %7, %8;\n" "}\n" : "+r"(d0), "+r"(d1) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %7, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f16.f16.f16 " "{%0, %1}," "{%2, %3, %4, %5}," " %6," " p, %8, %9, %10;\n" "}\n" : "+r"(d0), "+r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f16.f16.f16 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8, %9, %10;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f16.f16.f16 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11, %12;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12, %13, %14;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15, %16;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20, %21, %22;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23, %24;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %26, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," " %24," " %25," " p, %27, %28, %29, %30;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %29, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," "{%24, %25, %26, %27}," " %28," " p, %30, %31, %32;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36, %37, %38;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39, %40;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52, %53, %54;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55, %56;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F16F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68, %69, %70;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F16+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F16F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f16.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71, %72;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8, %9, %10;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12, %13, %14;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15, %16;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20, %21, %22;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23, %24;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36, %37, %38;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39, %40;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52, %53, %54;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55, %56;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68, %69, %70;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71, %72;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100, %101, %102;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103, %104;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F32F16F16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132, %133, %134;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F32+=F16*F16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F32F16F16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f32.f16.f16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135, %136;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f32.bf16.bf16 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8, %9, %10;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k16.f32.bf16.bf16 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12, %13, %14;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15, %16;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20, %21, %22;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23, %24;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36, %37, %38;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39, %40;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52, %53, %54;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55, %56;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68, %69, %70;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71, %72;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100, %101, %102;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103, %104;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F32BF16BF16_SS { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132, %133, %134;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x16 F32+=BF16*BF16 template < GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x16_F32BF16BF16_RS { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; static_assert(tnspA == GMMA::Major::K, "Register source operand A must have K major layout."); CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k16.f32.bf16.bf16 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135, %136;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k8.f32.tf32.tf32 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k8.f32.tf32.tf32 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x8_F32TF32TF32_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x8 TN F32+=TF32*TF32 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x8_F32TF32TF32_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k8.f32.tf32.tf32 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*S8 struct SM90_64x8x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8 " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*S8 struct SM90_64x8x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*S8 struct SM90_64x16x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*S8 struct SM90_64x16x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*S8 struct SM90_64x32x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*S8 struct SM90_64x32x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*S8 struct SM90_64x64x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*S8 struct SM90_64x64x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*S8 struct SM90_64x96x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*S8 struct SM90_64x96x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*S8 struct SM90_64x128x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*S8 struct SM90_64x128x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*S8 struct SM90_64x192x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*S8 struct SM90_64x192x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*S8 struct SM90_64x256x32_S32S8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*S8 struct SM90_64x256x32_S32S8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*S8 struct SM90_64x8x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*S8 struct SM90_64x8x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*S8 struct SM90_64x16x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*S8 struct SM90_64x16x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*S8 struct SM90_64x32x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*S8 struct SM90_64x32x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*S8 struct SM90_64x64x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*S8 struct SM90_64x64x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*S8 struct SM90_64x96x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*S8 struct SM90_64x96x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*S8 struct SM90_64x128x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*S8 struct SM90_64x128x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*S8 struct SM90_64x192x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*S8 struct SM90_64x192x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*S8 struct SM90_64x256x32_S32S8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*S8 struct SM90_64x256x32_S32S8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*U8 struct SM90_64x8x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8 " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*U8 struct SM90_64x8x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*U8 struct SM90_64x16x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*U8 struct SM90_64x16x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*U8 struct SM90_64x32x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*U8 struct SM90_64x32x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*U8 struct SM90_64x64x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*U8 struct SM90_64x64x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*U8 struct SM90_64x96x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*U8 struct SM90_64x96x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*U8 struct SM90_64x128x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*U8 struct SM90_64x128x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*U8 struct SM90_64x192x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*U8 struct SM90_64x192x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*U8 struct SM90_64x256x32_S32S8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*U8 struct SM90_64x256x32_S32S8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*U8 struct SM90_64x8x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=S8*U8 struct SM90_64x8x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*U8 struct SM90_64x16x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=S8*U8 struct SM90_64x16x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*U8 struct SM90_64x32x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=S8*U8 struct SM90_64x32x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*U8 struct SM90_64x64x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=S8*U8 struct SM90_64x64x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*U8 struct SM90_64x96x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=S8*U8 struct SM90_64x96x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*U8 struct SM90_64x128x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=S8*U8 struct SM90_64x128x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*U8 struct SM90_64x192x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=S8*U8 struct SM90_64x192x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*U8 struct SM90_64x256x32_S32S8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=S8*U8 struct SM90_64x256x32_S32S8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*S8 struct SM90_64x8x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8 " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*S8 struct SM90_64x8x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*S8 struct SM90_64x16x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*S8 struct SM90_64x16x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*S8 struct SM90_64x32x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*S8 struct SM90_64x32x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*S8 struct SM90_64x64x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*S8 struct SM90_64x64x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*S8 struct SM90_64x96x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*S8 struct SM90_64x96x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*S8 struct SM90_64x128x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*S8 struct SM90_64x128x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*S8 struct SM90_64x192x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*S8 struct SM90_64x192x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*S8 struct SM90_64x256x32_S32U8S8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*S8 struct SM90_64x256x32_S32U8S8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*S8 struct SM90_64x8x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*S8 struct SM90_64x8x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*S8 struct SM90_64x16x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*S8 struct SM90_64x16x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*S8 struct SM90_64x32x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*S8 struct SM90_64x32x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*S8 struct SM90_64x64x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*S8 struct SM90_64x64x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*S8 struct SM90_64x96x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*S8 struct SM90_64x96x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*S8 struct SM90_64x128x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*S8 struct SM90_64x128x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*S8 struct SM90_64x192x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*S8 struct SM90_64x192x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*S8 struct SM90_64x256x32_S32U8S8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*S8 struct SM90_64x256x32_S32U8S8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*U8 struct SM90_64x8x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8 " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*U8 struct SM90_64x8x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3}," " %4," " %5," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*U8 struct SM90_64x16x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*U8 struct SM90_64x16x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*U8 struct SM90_64x32x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*U8 struct SM90_64x32x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*U8 struct SM90_64x64x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*U8 struct SM90_64x64x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*U8 struct SM90_64x96x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*U8 struct SM90_64x96x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*U8 struct SM90_64x128x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*U8 struct SM90_64x128x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*U8 struct SM90_64x192x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*U8 struct SM90_64x192x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*U8 struct SM90_64x256x32_S32U8U8_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*U8 struct SM90_64x256x32_S32U8U8_SS_TN_SATURATE { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*U8 struct SM90_64x8x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN S32+=U8*U8 struct SM90_64x8x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*U8 struct SM90_64x16x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN S32+=U8*U8 struct SM90_64x16x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*U8 struct SM90_64x32x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN S32+=U8*U8 struct SM90_64x32x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*U8 struct SM90_64x64x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN S32+=U8*U8 struct SM90_64x64x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*U8 struct SM90_64x96x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN S32+=U8*U8 struct SM90_64x96x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*U8 struct SM90_64x128x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN S32+=U8*U8 struct SM90_64x128x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*U8 struct SM90_64x192x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN S32+=U8*U8 struct SM90_64x192x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67, uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71, uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75, uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79, uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83, uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87, uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91, uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63), "+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67), "+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71), "+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75), "+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79), "+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83), "+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87), "+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91), "+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*U8 struct SM90_64x256x32_S32U8U8_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN S32+=U8*U8 struct SM90_64x256x32_S32U8U8_RS_TN_SATURATE { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003, uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007, uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011, uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015, uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019, uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023, uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027, uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031, uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035, uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039, uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043, uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047, uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051, uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055, uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059, uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063, uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067, uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071, uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075, uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079, uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083, uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087, uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091, uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095, uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099, uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103, uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107, uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111, uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115, uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119, uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123, uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8.satfinite " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p;\n" "}\n" : "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003), "+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007), "+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011), "+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015), "+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019), "+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023), "+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027), "+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031), "+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035), "+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039), "+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043), "+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047), "+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051), "+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055), "+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059), "+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063), "+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067), "+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071), "+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075), "+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079), "+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083), "+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087), "+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091), "+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095), "+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099), "+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103), "+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107), "+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111), "+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115), "+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119), "+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123), "+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %4, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e4m3 " "{%0, %1}," " %2," " %3," " p, %5, %6;\n" "}\n" : "+r"(d0), "+r"(d1) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %7, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e4m3 " "{%0, %1}," "{%2, %3, %4, %5}," " %6," " p, %8, %9;\n" "}\n" : "+r"(d0), "+r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %26, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," " %24," " %25," " p, %27, %28;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %29, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," "{%24, %25, %26, %27}," " %28," " p, %30, %31;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E4M3E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E4M3*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E4M3E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %4, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e5m2 " "{%0, %1}," " %2," " %3," " p, %5, %6;\n" "}\n" : "+r"(d0), "+r"(d1) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %7, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e5m2 " "{%0, %1}," "{%2, %3, %4, %5}," " %6," " p, %8, %9;\n" "}\n" : "+r"(d0), "+r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %26, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," " %24," " %25," " p, %27, %28;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %29, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," "{%24, %25, %26, %27}," " %28," " p, %30, %31;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E4M3E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E4M3*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E4M3E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %4, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e4m3 " "{%0, %1}," " %2," " %3," " p, %5, %6;\n" "}\n" : "+r"(d0), "+r"(d1) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %7, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e4m3 " "{%0, %1}," "{%2, %3, %4, %5}," " %6," " p, %8, %9;\n" "}\n" : "+r"(d0), "+r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %26, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," " %24," " %25," " p, %27, %28;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %29, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," "{%24, %25, %26, %27}," " %28," " p, %30, %31;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E5M2E4M3_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E5M2*E4M3 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E5M2E4M3_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e4m3 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %4, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e5m2 " "{%0, %1}," " %2," " %3," " p, %5, %6;\n" "}\n" : "+r"(d0), "+r"(d1) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %7, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e5m2 " "{%0, %1}," "{%2, %3, %4, %5}," " %6," " p, %8, %9;\n" "}\n" : "+r"(d0), "+r"(d1) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x8x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x8x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %6, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3}," " %4," " %5," " p, %7, %8;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %9, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," " %8," " p, %10, %11;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x16x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x16x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, float & d0, float & d1, float & d2, float & d3, float & d4, float & d5, float & d6, float & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3), "+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %10, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," " %8," " %9," " p, %11, %12;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[8]; CUTE_HOST_DEVICE static void fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, uint64_t const& desc_b, uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %13, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7}," "{%8, %9, %10, %11}," " %12," " p, %14, %15;\n" "}\n" : "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3), "+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7) : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x32x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x32x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %18, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," " %16," " %17," " p, %19, %20;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[16]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %21, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15}," "{%16, %17, %18, %19}," " %20," " p, %22, %23;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x64x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x64x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %26, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," " %24," " %25," " p, %27, %28;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[24]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %29, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23}," "{%24, %25, %26, %27}," " %28," " p, %30, %31;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x96x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x96x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %34, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," " %32," " %33," " p, %35, %36;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[32]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %37, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31}," "{%32, %33, %34, %35}," " %36," " p, %38, %39;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x128x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x128x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %50, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," " %48," " %49," " p, %51, %52;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[48]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %53, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47}," "{%48, %49, %50, %51}," " %52," " p, %54, %55;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %98, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," " %96," " %97," " p, %99, %100;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x192x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x192x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[96]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, float & d00, float & d01, float & d02, float & d03, float & d04, float & d05, float & d06, float & d07, float & d08, float & d09, float & d10, float & d11, float & d12, float & d13, float & d14, float & d15, float & d16, float & d17, float & d18, float & d19, float & d20, float & d21, float & d22, float & d23, float & d24, float & d25, float & d26, float & d27, float & d28, float & d29, float & d30, float & d31, float & d32, float & d33, float & d34, float & d35, float & d36, float & d37, float & d38, float & d39, float & d40, float & d41, float & d42, float & d43, float & d44, float & d45, float & d46, float & d47, float & d48, float & d49, float & d50, float & d51, float & d52, float & d53, float & d54, float & d55, float & d56, float & d57, float & d58, float & d59, float & d60, float & d61, float & d62, float & d63, float & d64, float & d65, float & d66, float & d67, float & d68, float & d69, float & d70, float & d71, float & d72, float & d73, float & d74, float & d75, float & d76, float & d77, float & d78, float & d79, float & d80, float & d81, float & d82, float & d83, float & d84, float & d85, float & d86, float & d87, float & d88, float & d89, float & d90, float & d91, float & d92, float & d93, float & d94, float & d95, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %101, 0;\n" "wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95}," "{%96, %97, %98, %99}," " %100," " p, %102, %103;\n" "}\n" : "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03), "+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07), "+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11), "+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15), "+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19), "+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23), "+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27), "+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31), "+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35), "+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39), "+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43), "+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47), "+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51), "+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55), "+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59), "+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63), "+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67), "+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71), "+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75), "+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79), "+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83), "+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87), "+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91), "+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %66, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," " %64," " %65," " p, %67, %68;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F16+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F16E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = uint32_t[64]; CUTE_HOST_DEVICE static void fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03, uint64_t const& desc_b, uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03, uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07, uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11, uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15, uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19, uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23, uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27, uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31, uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35, uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39, uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43, uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47, uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51, uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55, uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59, uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %69, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63}," "{%64, %65, %66, %67}," " %68," " p, %70, %71;\n" "}\n" : "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03), "+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07), "+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11), "+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15), "+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19), "+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23), "+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27), "+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31), "+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35), "+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39), "+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43), "+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47), "+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51), "+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55), "+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59), "+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63) : "r"(a00), "r"(a01), "r"(a02), "r"(a03), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E5M2E5M2_SS_TN { using DRegisters = void; using ARegisters = uint64_t[1]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint64_t const& desc_a, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %130, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," " %128," " %129," " p, %131, %132;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "l"(desc_a), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // GMMA 64x256x32 TN F32+=E5M2*E5M2 template < GMMA::ScaleIn scaleA = GMMA::ScaleIn::One, GMMA::ScaleIn scaleB = GMMA::ScaleIn::One > struct SM90_64x256x32_F32E5M2E5M2_RS_TN { using DRegisters = void; using ARegisters = uint32_t[4]; using BRegisters = uint64_t[1]; using CRegisters = float[128]; CUTE_HOST_DEVICE static void fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003, uint64_t const& desc_b, float & d000, float & d001, float & d002, float & d003, float & d004, float & d005, float & d006, float & d007, float & d008, float & d009, float & d010, float & d011, float & d012, float & d013, float & d014, float & d015, float & d016, float & d017, float & d018, float & d019, float & d020, float & d021, float & d022, float & d023, float & d024, float & d025, float & d026, float & d027, float & d028, float & d029, float & d030, float & d031, float & d032, float & d033, float & d034, float & d035, float & d036, float & d037, float & d038, float & d039, float & d040, float & d041, float & d042, float & d043, float & d044, float & d045, float & d046, float & d047, float & d048, float & d049, float & d050, float & d051, float & d052, float & d053, float & d054, float & d055, float & d056, float & d057, float & d058, float & d059, float & d060, float & d061, float & d062, float & d063, float & d064, float & d065, float & d066, float & d067, float & d068, float & d069, float & d070, float & d071, float & d072, float & d073, float & d074, float & d075, float & d076, float & d077, float & d078, float & d079, float & d080, float & d081, float & d082, float & d083, float & d084, float & d085, float & d086, float & d087, float & d088, float & d089, float & d090, float & d091, float & d092, float & d093, float & d094, float & d095, float & d096, float & d097, float & d098, float & d099, float & d100, float & d101, float & d102, float & d103, float & d104, float & d105, float & d106, float & d107, float & d108, float & d109, float & d110, float & d111, float & d112, float & d113, float & d114, float & d115, float & d116, float & d117, float & d118, float & d119, float & d120, float & d121, float & d122, float & d123, float & d124, float & d125, float & d126, float & d127, GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One) { #if defined(CUTE_ARCH_MMA_SM90A_ENABLED) asm volatile( "{\n" ".reg .pred p;\n" "setp.ne.b32 p, %133, 0;\n" "wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e5m2 " "{%0, %1, %2, %3, %4, %5, %6, %7, " " %8, %9, %10, %11, %12, %13, %14, %15, " " %16, %17, %18, %19, %20, %21, %22, %23, " " %24, %25, %26, %27, %28, %29, %30, %31, " " %32, %33, %34, %35, %36, %37, %38, %39, " " %40, %41, %42, %43, %44, %45, %46, %47, " " %48, %49, %50, %51, %52, %53, %54, %55, " " %56, %57, %58, %59, %60, %61, %62, %63, " " %64, %65, %66, %67, %68, %69, %70, %71, " " %72, %73, %74, %75, %76, %77, %78, %79, " " %80, %81, %82, %83, %84, %85, %86, %87, " " %88, %89, %90, %91, %92, %93, %94, %95, " " %96, %97, %98, %99, %100, %101, %102, %103, " " %104, %105, %106, %107, %108, %109, %110, %111, " " %112, %113, %114, %115, %116, %117, %118, %119, " " %120, %121, %122, %123, %124, %125, %126, %127}," "{%128, %129, %130, %131}," " %132," " p, %134, %135;\n" "}\n" : "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003), "+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007), "+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011), "+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015), "+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019), "+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023), "+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027), "+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031), "+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035), "+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039), "+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043), "+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047), "+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051), "+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055), "+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059), "+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063), "+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067), "+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071), "+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075), "+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079), "+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083), "+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087), "+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091), "+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095), "+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099), "+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103), "+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107), "+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111), "+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115), "+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119), "+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123), "+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127) : "r"(a000), "r"(a001), "r"(a002), "r"(a003), "l"(desc_b), "r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB))); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cute
cutlass/include/cute/arch/mma_sm90_gmma.hpp/0
{ "file_path": "cutlass/include/cute/arch/mma_sm90_gmma.hpp", "repo_id": "cutlass", "token_count": 552613 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1)) #define CUTLASS_ARCH_MMA_SM70_SUPPORTED #endif #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) #if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 &&__CUDACC_VER_MINOR__ >= 1)) #define CUTLASS_ARCH_MMA_SM70_ENABLED #endif #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// // // Matrix multiply accumulate 884 - FP16 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8,8,4>, 8, half_t, layout::ColumnMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::RowMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::RowMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Matrix multiply accumulate 884 - FP32 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::RowMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::RowMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation specialized for the entire warp template < typename LayoutA, typename LayoutB, typename ElementC, typename LayoutC, typename Operator > struct Mma< gemm::GemmShape<16, 16, 4>, 32, half_t, LayoutA, half_t, LayoutB, ElementC, LayoutC, Operator > : public Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, LayoutA, half_t, LayoutB, ElementC, LayoutC, Operator> { using Shape = gemm::GemmShape<16, 16, 4>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/mma_sm70.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sm70.h", "repo_id": "cutlass", "token_count": 7247 }
18
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This file contains definitions and utility functions for describing convolution problem sizes. Conv2dProblem desciption: activation (NHWC), filter (KRSC), output (NPQK), pading (pad_h, pad_w), stride (stride_h, stride_w), dilation (dilation_h, dilation_w). Free functions to map: Map tensor extents (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator) Map tensor sizes (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator) Map tensor problem sizes (Conv2d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator) */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_coord.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm_enumerated_types.h" #include "cutlass/matrix_coord.h" #include "cutlass/conv/convolution.h" #include "cutlass/functional.h" namespace cutlass { namespace conv { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Problem size structure struct Conv2dProblemSize { // Conv2d strictly problem size parameters int N, H, W, C, P, Q, K, R, S; int pad_h, pad_w; int stride_h, stride_w; int dilation_h, dilation_w; Mode mode; // Conv2d implementation-related parameters int split_k_slices; int groups; // // Methods // public: CUTLASS_HOST_DEVICE Conv2dProblemSize(): N(0), H(0), W(0), C(0), P(0), Q(0), K(0), R(0), S(0), pad_h(0), pad_w(0), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(Mode::kConvolution), split_k_slices(1), groups(1) { } /// Constructor for default padding, stride, dilation, and split-K CUTLASS_HOST_DEVICE Conv2dProblemSize( int N, int H, int W, int C, int P, int Q, int K, int R, int S, Mode mode ): N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S), pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(mode), split_k_slices(1), groups (1) { } /// Constructor CUTLASS_HOST_DEVICE Conv2dProblemSize( int N, int H, int W, int C, int K, int R, int S, int P, int Q, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, Mode mode, int split_k_slices = 1, int groups = 1 ): N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S), pad_h(pad_h), pad_w(pad_w), stride_h(stride_h), stride_w(stride_w), dilation_h(dilation_h), dilation_w(dilation_w), mode(mode), split_k_slices(split_k_slices), groups (groups) { } /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // set user-defined output size and sets P and Q (include all data members in ctor) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _ cutlass::MatrixCoord stride, // stride_h, stride_w cutlass::MatrixCoord dilation, // dilation_h, dilation_w cutlass::Tensor4DCoord output_size, // NPQK cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), P(output_size.h()), Q(output_size.w()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(padding[0]), pad_w(padding[2]), stride_h(stride.row()), stride_w(stride.column()), dilation_h(dilation.row()), dilation_w(dilation.column()), mode(mode), split_k_slices(split_k_slices), groups(groups) {} /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // computes output size and sets P and Q (skip output from ctor arguments) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord padding, // pad_h, upper_pad_h, pad_w, upper_pad_w cutlass::MatrixCoord stride, // stride_h, stride_w cutlass::MatrixCoord dilation, // dilation_h, dilation_w cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(padding[0]), pad_w(padding[2]), stride_h(stride.row()), stride_w(stride.column()), dilation_h(dilation.row()), dilation_w(dilation.column()), mode(mode), split_k_slices(split_k_slices), groups(groups) { // set output P and Q P = ((H + pad_h + padding[1] - R * dilation_h) / stride_h) + 1; Q = ((W + pad_w + padding[3] - S * dilation_w) / stride_w) + 1; } /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // set user-defined output size and sets P and Q (skip padding, striding, and dilation) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord output_size, // NPQK cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), P(output_size.h()), Q(output_size.w()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(mode), split_k_slices(split_k_slices), groups(groups) {} // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv2dProblemSize reset_mode(cutlass::conv::Mode mode_) { Conv2dProblemSize tmp(*this); tmp.mode = mode_; return tmp; } // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv2dProblemSize reset_split_k_slices(int split_k_slices_) { Conv2dProblemSize tmp(*this); tmp.split_k_slices = split_k_slices_; return tmp; } /// Equality operator (ignores mode and split_k_slice) CUTLASS_HOST_DEVICE bool operator==(Conv2dProblemSize const &conv) const { return ( (N == conv.N) && (H == conv.H) && (W == conv.W) && (C == conv.C) && (K == conv.K) && (R == conv.R) && (S == conv.S) && (P == conv.P) && (Q == conv.Q) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w) ); } /// Inequality operator CUTLASS_HOST_DEVICE bool operator!=(Conv2dProblemSize const &rhs) const { return !(*this == rhs); } /// Returns activation extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord activation_extent() const { return cutlass::Tensor4DCoord ({N, H, W, C}); } /// Returns filter extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord filter_extent(bool is_deconv = false) const { return is_deconv ? cutlass::Tensor4DCoord ({C, R, S, K / groups}) : cutlass::Tensor4DCoord ({K, R, S, C / groups}); } /// Returns output extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord output_extent() const { return cutlass::Tensor4DCoord ({N, P, Q, K}); } /// Returns activation size in number of elements CUTLASS_HOST_DEVICE int64_t activation_size() const { return (N * H * W * C); } /// Returns filter size in number of elements CUTLASS_HOST_DEVICE int64_t filter_size() const { return (K * R * S * C / groups); } /// Returns output size in number of elements CUTLASS_HOST_DEVICE int64_t output_size() const { return (N * P * Q * K); } /// Returns padding as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord padding() const { return cutlass::Tensor4DCoord ({pad_h, pad_h, pad_w, pad_w}); } /// Returns stride as MatrixCoord CUTLASS_HOST_DEVICE cutlass::MatrixCoord stride() const { return cutlass::MatrixCoord ({stride_h, stride_w}); } /// Returns dilation as MatrixCoord CUTLASS_HOST_DEVICE cutlass::MatrixCoord dilation() const { return cutlass::MatrixCoord ({dilation_h, dilation_w}); } ///////////////////////////////////////////////////////////////// // Methods used for strided dgrad implementation ///////////////////////////////////////////////////////////////// /// Number of filter r positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_r(int r) const { return ((R - r + stride_h - 1) / stride_h); } /// Number of filter s positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_s(int s) const { return ((S - s + stride_w - 1) / stride_w); } /// Number of filter positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_positions(int r, int s) const { return num_gemm_k_filter_r(r) * num_gemm_k_filter_s(s); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // ImplicitGemm helper functions // //////////////////////////////////////////////////////////////////////////////////////////////////// /// Determine the problem size of the implicit GEMM operation CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord implicit_gemm_problem_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { // Compute problem size switch (conv_operator) { case Operator::kFprop: return gemm::GemmCoord( problem_size.N * problem_size.P * problem_size.Q, problem_size.K, problem_size.R * problem_size.S * problem_size.C / problem_size.groups ); case Operator::kDeconv: case Operator::kDgrad: return gemm::GemmCoord( problem_size.N * problem_size.H * problem_size.W, problem_size.C, problem_size.R * problem_size.S * problem_size.K ); case Operator::kWgrad: return gemm::GemmCoord( problem_size.K, problem_size.R * problem_size.S * problem_size.C, problem_size.N * problem_size.P * problem_size.Q ); default: break; } return gemm::GemmCoord(); } // Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm CUTLASS_HOST_DEVICE int implicit_gemm_k_iterations( Operator conv_operator, int threadblock_K, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic, GroupMode group_mode = GroupMode::kNone, int threadblock_N = 0) { int iterations = 0; if (group_mode == GroupMode::kNone) { if (algorithm == IteratorAlgorithm::kFixedChannels) { int positions_per_iteration = threadblock_K / problem_size.C; switch (conv_operator) { case Operator::kFprop: iterations = (problem_size.R * problem_size.S + positions_per_iteration - 1 ) / positions_per_iteration; break; default: break; } } else if (algorithm == IteratorAlgorithm::kFewChannels) { switch (conv_operator) { case Operator::kFprop: iterations = (problem_size.R * problem_size.S * problem_size.C + threadblock_K - 1 ) / threadblock_K; break; default: break; } } else { int elements_per_split_k_slice = 0; switch (conv_operator) { case Operator::kFprop: elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kDeconv: case Operator::kDgrad: elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kWgrad: elements_per_split_k_slice = (problem_size.N * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K; break; default: break; } } } else if (group_mode == GroupMode::kDepthwise) { int channels_per_cta = threadblock_N; if (algorithm == IteratorAlgorithm::kAnalytic) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_cta + threadblock_K - 1) / threadblock_K); break; default: break; } } } else { // Group conv int channels_per_group = problem_size.C / problem_size.groups; int k_per_group = problem_size.K / problem_size.groups; if (algorithm == IteratorAlgorithm::kAnalytic) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K); // In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups if (problem_size.groups != 1) { if (k_per_group < threadblock_N) { iterations *= threadblock_N / k_per_group; } } break; default: break; } } else if (algorithm == IteratorAlgorithm::kOptimized) { // Current optimized iterator only support GroupMode::kSingleGroup if (group_mode == GroupMode::kSingleGroup) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K); break; default: break; } } } } return iterations; } template <int N = 1, int Output_P = 1, int Output_Q = 1> CUTLASS_HOST_DEVICE int depthwise_gemm_k_iterations( Operator conv_operator, int threadblock_K, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic, GroupMode group_mode = GroupMode::kNone, int threadblock_N = 0) { int n = problem_size.N; int p = (problem_size.P + Output_P - 1) / Output_P; int q = (problem_size.Q + Output_Q - 1) / Output_Q; int iterations = (n * p * q + problem_size.split_k_slices - 1) / problem_size.split_k_slices; return iterations; } CUTLASS_HOST_DEVICE int implicit_gemm_k_iterations_per_channel( Operator conv_operator, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic) { int iterations = 0; //0 means not applicable if (algorithm == IteratorAlgorithm::kAnalytic || algorithm == IteratorAlgorithm::kOptimized) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S; break; case Operator::kDeconv: case Operator::kDgrad: iterations = problem_size.R * problem_size.S; break; default: break; } } return iterations; } //////////////////////////////////////////////////////////////////////////////// // Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output) //////////////////////////////////////////////////////////////////////////////// /// Returns ImplicitGemm tensor A extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_a_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.output_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor B extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_b_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_extent(); case cutlass::conv::Operator::kDeconv: return problem_size.filter_extent(true); case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor C extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_c_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor A size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_a_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_size(); case cutlass::conv::Operator::kWgrad: return problem_size.output_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor B size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_b_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.filter_size(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor C size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_c_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_size(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_size(); default : break; } return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // Strided dgrad helper functions // //////////////////////////////////////////////////////////////////////////////////////////////////// // Returns number of CTAs tile M to cover valid MMAs per starting filter postion CUTLASS_HOST_DEVICE int strided_dgrad_tile_m_per_filter( Conv2dProblemSize const &problem_size, int tile_size_m) { // Compute NHW rows in Dx output that needs MMA per starting filter position int rows_h_per_filter = (problem_size.H + problem_size.stride_h - 1) / problem_size.stride_h; int rows_w_per_filter = (problem_size.W + problem_size.stride_w - 1) / problem_size.stride_w; int rows_nhw_per_filter = problem_size.N * rows_h_per_filter * rows_w_per_filter; // Number of CTAs tile M to cover valid MMAs per starting filter postion int tile_m_per_filter = (rows_nhw_per_filter + tile_size_m - 1) / tile_size_m; return tile_m_per_filter; } // Computes starting Dx coord (h, w) for given starting filter postion CUTLASS_HOST_DEVICE void strided_dgrad_starting_coords( Conv2dProblemSize const &problem_size, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int r, int s, int &start_h, int &start_w) { // function locals for remainder by fast divmod int pad_h_rem_, pad_w_rem_; // start_h = std::abs(problem_size.stride_h - ((problem_size.pad_h % problem_size.stride_h) - r)) % problem_size.stride_h; stride_h_divmod.divmod(pad_h_rem_, problem_size.pad_h); int r_ = absolute_value(problem_size.stride_h - (pad_h_rem_ - r)); stride_h_divmod.divmod(start_h, r_); //start_w = std::abs(problem_size.stride_w - ((problem_size.pad_w % problem_size.stride_w) - s)) % problem_size.stride_w; stride_w_divmod.divmod(pad_w_rem_, problem_size.pad_w); int s_ = absolute_value(problem_size.stride_w - (pad_w_rem_ - s)); stride_w_divmod.divmod(start_w, s_); } } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/conv2d_problem_size.h/0
{ "file_path": "cutlass/include/cutlass/conv/conv2d_problem_size.h", "repo_id": "cutlass", "token_count": 8897 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Extracts the host-params objects into non-template code. */ #pragma once #define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0 #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/threadblock/conv2d_params.h" #include "cutlass/conv/conv3d_problem_size.h" #if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED #include <fstream> #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Params structure used for all Conv3d analytic tile iterators template< typename Layout_ = layout::TensorNDHWC > struct Conv3dAnalyticParams { using Layout = Layout_; Layout layout; // // Methods // CUTLASS_HOST_DEVICE Conv3dAnalyticParams() { } CUTLASS_HOST_DEVICE Conv3dAnalyticParams( Conv3dProblemSize const &, // unused; placeholder to match other Params interfaces. Layout const &layout ): layout(layout) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized template< typename Layout_ = layout::TensorNDHWC > struct Conv3dFpropActivationIteratorOptimizedParams; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized template<> struct Conv3dFpropActivationIteratorOptimizedParams<layout::TensorNDHWC> { using Layout = layout::TensorNDHWC; Layout layout; int64_t inc_next[4]; // {next S, next R, next T, next C} int filter_c_delta; // number of logical elements to add to filter_c_ int ZPQ; // product of Z*P*Q int PQ; // product of P*Q FastDivmod zpq_divmod; FastDivmod pq_divmod; FastDivmod q_divmod; // // Methods // CUTLASS_HOST_DEVICE Conv3dFpropActivationIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dFpropActivationIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, ///< layout object int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), PQ(problem_size.P * problem_size.Q), ZPQ(problem_size.Z * problem_size.P * problem_size.Q), zpq_divmod(ZPQ), pq_divmod(PQ), q_divmod(problem_size.Q) { TRACE_CONV_INITIALIZERS("conv3d_fprop", "activation", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1); // next S inc_next[0] = conv_sign * ( int64_t(layout.stride()[0]) * problem_size.dilation_w ) * element_size_bits / 8; // next R inc_next[1] = conv_sign * ( int64_t(layout.stride()[1]) * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next T inc_next[2] = conv_sign * ( int64_t(layout.stride()[2]) * problem_size.dilation_d - (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next C inc_next[3] = ( threadblock_shape.column() * problem_size.split_k_slices - conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // logical offset added to internal channel counter - units are elements, not bytes filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< typename Layout_ = layout::TensorNDHWC > struct Conv3dFpropFilterIteratorOptimizedParams; ///////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Conv3dFpropFilterIteratorOptimizedParams<layout::TensorNDHWC> { using Layout = layout::TensorNDHWC; Layout layout; int TRS; int filter_c_delta; int64_t inc_next_k; // offset in units of bytes to next K position int64_t inc_next_trs; // offset in units of bytes to next TRS position int64_t inc_next_c; // offset in units of bytes to next C position // // Methods // CUTLASS_HOST_DEVICE Conv3dFpropFilterIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dFpropFilterIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_fprop", "filter", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); TRS = problem_size.T * problem_size.R * problem_size.S; inc_next_k = (int64_t(layout.stride()[3]) * threadmap_delta.strided() * element_size_bits) / 8; inc_next_trs = ( int64_t(layout.stride()[0]) - int64_t(layout.stride()[3]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided() ) * element_size_bits / 8; inc_next_c = ( threadblock_shape.row() * problem_size.split_k_slices - int64_t(TRS - 1) * layout.stride()[0] - int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[3] ) * element_size_bits / 8; filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters object for Conv3d DGRAD OutputGradient (dy) iterator struct Conv3dDgradOutputGradientIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int64_t inc_next[4]; // {next S, next R, next T, next K} int filter_k_delta; // number of logical elements to add to filter_k_ FastDivmod dhw_divmod; FastDivmod hw_divmod; FastDivmod w_divmod; // // Methods // CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, ///< layout object int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), dhw_divmod(problem_size.D * problem_size.H * problem_size.W), hw_divmod(problem_size.H * problem_size.W), w_divmod(problem_size.W) { TRACE_CONV_INITIALIZERS("conv3d_dgrad", "output_gradient", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1); // next S inc_next[0] = conv_sign * ( int64_t(layout.stride()[0]) * problem_size.dilation_w ) * element_size_bits / 8; // next R inc_next[1] = conv_sign * ( int64_t(layout.stride()[1]) * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next T inc_next[2] = conv_sign * ( int64_t(layout.stride()[2]) * problem_size.dilation_d - (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next K inc_next[3] = ( threadblock_shape.column() * problem_size.split_k_slices - conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // logical offset added to internal channel counter - units are elements, not bytes filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters object for Conv2d DGRAD Filter (w) iterator struct Conv3dDgradFilterIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int TRS; int filter_k_delta; int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile int64_t inc_next_trs; // offset in units of bytes to next TRS position int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile // // Methods // CUTLASS_HOST_DEVICE Conv3dDgradFilterIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dDgradFilterIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), TRS(problem_size.T * problem_size.R * problem_size.S) { TRACE_CONV_INITIALIZERS("conv3d_dgrad", "filter", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); inc_next_strided = ((int64_t)layout.stride()[3] * threadmap_delta.strided() * element_size_bits) / 8; inc_next_trs = ( (int64_t)layout.stride()[0] - (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3] ) * element_size_bits / 8; inc_next_k = ( threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[3] - (problem_size.T * problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0] - (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3] ) * element_size_bits / 8; filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices; } }; /// Parameters object for Conv3d WGRAD OutputGradient iterator struct Conv3dWgradOutputGradientIteratorOptimizedParams { using Layout = layout::TensorNDHWC; using LongIndex = typename Layout::LongIndex; Layout layout; int NZPQ; // precomputd product of N*Z*P*Q for clearing predicates int ZPQ; // product of Z*P*Q unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ unsigned zpq_shr; // in device code. int PQ; // product of P*Q unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ unsigned pq_shr; // in device code. unsigned q_mul; // precomputed quantities for fast computation of div/% by Q unsigned q_shr; // in device code. LongIndex offset_next_strided; // offset in units of bytes to next nzpq coordinate within tile LongIndex offset_next_contiguous; // offset in units of bytes to next k coordinate within tile LongIndex inc_next_nzpq; // offset in units of bytes to next nzpq position in subsequent tile // // Methods // CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_wgrad", "output_gradient", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); // Incremental offsets in unites of bytes (number of elements) * element_size_bits / 8 offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0]) * element_size_bits / 8; offset_next_contiguous = (threadmap_delta.contiguous()) * element_size_bits / 8; inc_next_nzpq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0]) * element_size_bits / 8; // Precompute several quantities for fast modulo arithmetic. NZPQ = problem_size.N * problem_size.Z * problem_size.P * problem_size.Q; ZPQ = problem_size.Z * problem_size.P * problem_size.Q; find_divisor(zpq_mul, zpq_shr, ZPQ); PQ = problem_size.P * problem_size.Q; find_divisor(pq_mul, pq_shr, PQ); find_divisor(q_mul, q_shr, problem_size.Q); } }; /// Parameters object for Conv3d WGRAD Activation Tile Access Iterator struct Conv3dWgradActivationIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int RSC; // product of R*S*C unsigned rsc_mul; // precomputed quantities for fast computation of div/% by RSC unsigned rsc_shr; // in device code. int SC; // product of S*C unsigned sc_mul; // precomputed quantities for fast computation of div/% by SC unsigned sc_shr; // in device code. unsigned c_mul; // precomputed quantities for fast computation of div/% by C unsigned c_shr; // in device code. int ZPQ; // product of Z*P*Q unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ unsigned zpq_shr; // in device code. int PQ; // product of P*Q unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ unsigned pq_shr; // in device code. unsigned q_mul; // precomputed quantities for fast computation of div/% by Q unsigned q_shr; // in device code. // // Methods // CUTLASS_HOST_DEVICE Conv3dWgradActivationIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dWgradActivationIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_wgrad", "activation", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); // Precompute several quantities for fast modulo arithmetic. RSC = problem_size.R * problem_size.S * problem_size.C; find_divisor(rsc_mul, rsc_shr, RSC); SC = problem_size.S * problem_size.C; find_divisor(sc_mul, sc_shr, SC); find_divisor(c_mul, c_shr, problem_size.C); ZPQ = problem_size.Z * problem_size.P * problem_size.Q; find_divisor(zpq_mul, zpq_shr, ZPQ); PQ = problem_size.P * problem_size.Q; find_divisor(pq_mul, pq_shr, PQ); find_divisor(q_mul, q_shr, problem_size.Q); } }; } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_params.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_params.h", "repo_id": "cutlass", "token_count": 6898 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped fused activation's scale+bias+relu and Implicit GEMM Convolution kernel. The original implicit gemm will store out-of-bound data as zeroes in the shared memory because zeros into the tensor core, zeroes out of the tensor cores. The result is remained the same. When fusing scale+bias+relu into the mainloop, it is no longer true because 0 x scale + bias = bias which is no longer always 0. So, instead of storing zeroes, this fused kernel stores the out-of-bound data as a special NaN (0x7eff), when applying scale+bias+relu, the code is like if (data == 0x7eff) data = 0; else data = scale+bias+relu(data, scale, bias); The biggest difference compared with the fused Fprop and scale+bias+relu is that scale and bias are loop invariant in Wgrad so that they only needs to be loaded once before the mainloop. See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" #include "cutlass/conv/warp/scale_bias_relu_transform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Element type of scale and bias vectors typename ElementScaleBias_, /// Layout of scale and bias vectors typename LayoutScaleBias_, /// Element type of scale and bias vectors /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class MmaWgradFusionBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Element type of scale and bias vectors using ElementScaleBias = ElementScaleBias_; /// Layout of scale and bias vectors using LayoutScaleBias = LayoutScaleBias_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; static_assert(kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert((kWarpGemmIterations % 2) == 0, "Inner loop iteration must be an even number."); // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaWgradFusionBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over vectors of scale and bias vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorScaleBias_, /// Iterates over vectors of scale and bias vector i /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class ImplicitGemmWgradFusionMultistage : public MmaWgradFusionBase<Shape_, typename IteratorScaleBias_::Element, typename IteratorScaleBias_::Layout, Policy_, Stages> { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorScaleBias = IteratorScaleBias_; ///< Policy describing tuning details using Policy = Policy_; ///< Base class using Base = MmaWgradFusionBase<Shape_, typename IteratorScaleBias::Element, typename IteratorScaleBias::Layout, Policy_, Stages>; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using ElementC = typename Policy::Operator::ElementC; using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Internal structure exposed for introspection. struct Detail { /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; static int const kBBufferSize = ((sizeof(typename Operator::ElementC) == 4) && ((platform::is_same<typename Operator::Policy::Operator::ElementA, typename Operator::ElementA>::value && platform::is_same<typename Operator::Policy::Operator::ElementB, typename Operator::ElementB>::value)) && (Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64)) ? 1 : 2; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpLoadedFragmentScaleBias = typename IteratorScaleBias::Fragment; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; int warp_idx_m_; int warp_idx_n_; public: /// Construct from tensor references CUTLASS_DEVICE ImplicitGemmWgradFusionMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; // Uses nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over scale and bias vectors in global memory IteratorScaleBias iterator_B_scale_bias, ///< initial value of accumulator FragmentC const &src_accum, ///< number of iterations per channel int gemm_k_iterations_per_channel = 0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // WarpLoadedFragmentScaleBias warp_loaded_frag_B_scale_bias; iterator_B_scale_bias.add_tile_offset({0, warp_idx_n_}); iterator_B_scale_bias.load(warp_loaded_frag_B_scale_bias); // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; // Uses Nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[Detail::kBBufferSize]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[Detail::kBBufferSize]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; cutlass::conv::warp::WgradScaleBiasReluTransform<WarpTransformedFragmentB, WarpLoadedFragmentScaleBias> elementwise_transform; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); elementwise_transform(warp_transformed_frag_B[0], warp_loaded_frag_B_scale_bias); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. if (Detail::kBBufferSize == 2) { this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize]); ++this->warp_tile_iterator_A_; } this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) { warp_mma.transform(warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % Detail::kBBufferSize], warp_loaded_frag_B[warp_mma_k % 2]); elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_B_scale_bias); } warp_mma( accum, warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize], warp_transformed_frag_B[warp_mma_k % 2], accum ); if (Detail::kBBufferSize == 1) { this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); ++this->warp_tile_iterator_A_; } if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); elementwise_transform( warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_B_scale_bias); } // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h", "repo_id": "cutlass", "token_count": 10647 }
21
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree load operations for the sm90 TMA warp-specialized (ws) epilogue */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/arch/barrier.h" #include "cute/tensor.hpp" #include "sm90_visitor_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::fusion { using namespace cute; using namespace detail; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // Elementwise Fetch Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// // returns accumulator struct Sm90AccFetch : Sm90VisitorImpl<> { using Sm90VisitorImpl<>::Sm90VisitorImpl; struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<ElementAccumulator, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { return frg_acc; } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { return ConsumerStoreCallbacks{}; } }; // Split tree visitor fetches intermediate results from temporary accumulators using Sm90SplitTreeFetch = Sm90AccFetch; ///////////////////////////////////////////////////////////////////////////////////////////////// // returns C template <class Element> struct Sm90SrcFetch : Sm90VisitorImpl<> { CUTLASS_DEVICE bool is_producer_load_needed() const { return is_C_load_needed(); } CUTLASS_DEVICE bool is_C_load_needed() const { return not is_void_v<Element>; } CUTLASS_DEVICE bool is_zero() const { return is_void_v<Element>; } using Sm90VisitorImpl<>::Sm90VisitorImpl; template<class SrcTensor> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(SrcTensor const& tCrC) : tCrC(tCrC) {} SrcTensor const& tCrC; // (CPY,CPY_M,CPY_N) template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<typename SrcTensor::value_type, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { return recast<Array<typename SrcTensor::value_type, FragmentSize>>(tCrC)(epi_v); } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { // register type may differ from logical type so we can't assert matching types here return ConsumerStoreCallbacks(args.tCrC); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Elementwise Load Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages, class EpilogueTile, class Element, class StrideMNL, class SmemLayoutAtom, class CopyOpS2R, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Fallback scalar broadcast for nullptr params > struct Sm90AuxLoad { static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet"); constexpr static bool is_m_major = epilogue::collective::detail::is_m_major<StrideMNL>(); // Find the max contiguous layout usable by TMA (if EpilogueTile is a non-compact tiler) using SmemShapeTma = decltype(make_shape( max_common_vector(make_layout(get<0>(EpilogueTile{})),make_layout(get<0>(EpilogueTile{}))), max_common_vector(make_layout(get<1>(EpilogueTile{})),make_layout(get<1>(EpilogueTile{}))))); using SmemLayoutTma = decltype(tile_to_shape( SmemLayoutAtom{}, SmemShapeTma{}, cute::conditional_t<is_m_major, Step<_2,_1>, Step<_1,_2>>{} )); using SmemLayout = decltype(tile_to_shape( SmemLayoutTma{}, make_shape(size<0>(shape(EpilogueTile{})), size<1>(shape(EpilogueTile{})), Int<Stages>{}), cute::conditional_t<is_m_major, Step<_2,_1,_3>, Step<_1,_2,_3>>{} )); using CopyOpG2S = SM90_TMA_LOAD ; struct SharedStorage { alignas(cutlass::detail::alignment_for_swizzle(SmemLayout{})) array_aligned<Element, size(SmemLayout{})> smem_aux; }; struct Arguments { Element const* ptr_aux = nullptr; Element null_default = Element(0); StrideMNL dAux = {}; }; struct Params { using TMA_Aux = decltype(make_tma_copy( CopyOpG2S{}, make_tensor(make_gmem_ptr(static_cast<Element const*>(nullptr)), repeat_like(StrideMNL{}, int32_t(0)), append<3>(StrideMNL{}, _0{})), take<0,2>(SmemLayoutTma{}))); TMA_Aux tma_load_aux; Element null_default = Element(0); bool use_default = false; }; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) auto problem_shape_mnkl = append<4>(problem_shape, 1); auto [M, N, K, L] = problem_shape_mnkl; auto M_AUX = size(M) ; Tensor tensor_aux = make_tensor(make_gmem_ptr(args.ptr_aux), make_layout(make_shape(M_AUX,N,L), append<3>(args.dAux, _0{}))); typename Params::TMA_Aux tma_load_aux = make_tma_copy(CopyOpG2S{}, tensor_aux, take<0,2>(SmemLayoutTma{})); bool use_default = false; if constexpr (EnableNullptr) { use_default = args.ptr_aux == nullptr; } return Params{tma_load_aux, args.null_default, use_default}; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } CUTLASS_HOST_DEVICE Sm90AuxLoad() { } CUTLASS_HOST_DEVICE Sm90AuxLoad(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params), smem_aux(const_cast<Element*>(shared_storage.smem_aux.data())) { } Params const* params_ptr; Element* smem_aux; CUTLASS_DEVICE bool is_producer_load_needed() const { return true; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_DEVICE bool is_zero() const { return (params_ptr->use_default && params_ptr->null_default == Element(0)); } template <class GTensor, class STensor> struct ProducerLoadCallbacks : EmptyProducerLoadCallbacks { CUTLASS_DEVICE ProducerLoadCallbacks(GTensor&& bGS_gAux, STensor&& bGS_sAux, Params const* params_ptr) : bGS_gAux(cute::forward<GTensor>(bGS_gAux)), bGS_sAux(cute::forward<STensor>(bGS_sAux)), params_ptr(params_ptr) {} GTensor bGS_gAux; // (TMA,TMA_M,TMA_N,EPI_M,EPI_N) STensor bGS_sAux; // (TMA,TMA_M,TMA_N,PIPE) Params const* params_ptr; CUTLASS_DEVICE void step(uint64_t* full_mbarrier_ptr, int epi_m, int epi_n, int load_iteration, bool issue_tma_load) { if constexpr (EnableNullptr) { if (params_ptr->use_default) { return; } } if (issue_tma_load) { // Increment the expected transaction bytes of the current stage's mbarrier by the subtile's byte-size constexpr uint32_t copy_bytes = size(take<0,2>(SmemLayout{})) * sizeof_bits_v<Element> / 8; cutlass::arch::ClusterTransactionBarrier::expect_transaction(full_mbarrier_ptr, copy_bytes); // Issue the TMA load constexpr uint16_t mcast_mask = 0; int load_pipe_index = load_iteration % Stages; copy(params_ptr->tma_load_aux.with(*full_mbarrier_ptr, mcast_mask), bGS_gAux(_,_,_,epi_m,epi_n), bGS_sAux(_,_,_,load_pipe_index)); } } }; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { auto [M, N, K, L] = args.problem_shape_mnkl; auto [m, n, k, l] = args.tile_coord_mnkl; auto coord_shape = make_coord(m, n, l) ; Tensor mAux_mn = params_ptr->tma_load_aux.get_tma_tensor(make_shape(M,N,L)); // (M,N,L) Tensor mAux = coalesce(mAux_mn, take<0,2>(args.tile_shape_mnk)); Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), coord_shape); // (CTA_M,CTA_N) Tensor gAux_epi = flat_divide(gAux, args.epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N) Tensor sAux_epi = make_tensor(make_smem_ptr(smem_aux), SmemLayout{}); // (EPI_TILE_M,EPI_TILE_N,PIPE) ThrCopy thrblk_g2s = params_ptr->tma_load_aux.get_slice(_0{}); Tensor bGS_gAux = thrblk_g2s.partition_S(gAux_epi); // (TMA,TMA_M,TMA_N,EPI_M,EPI_N) Tensor bGS_sAux = thrblk_g2s.partition_D(sAux_epi); // (TMA,TMA_M,TMA_N,PIPE) return ProducerLoadCallbacks<decltype(bGS_gAux), decltype(bGS_sAux)>( cute::move(bGS_gAux), cute::move(bGS_sAux), params_ptr); } template <class RTensor, class TiledS2R, class STensorS2R> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(RTensor&& tC_rAux, TiledS2R tiled_s2r, STensorS2R&& tSR_sAux, Params const* params_ptr) : tC_rAux(cute::forward<RTensor>(tC_rAux)), tiled_s2r(tiled_s2r), tSR_sAux(cute::forward<STensorS2R>(tSR_sAux)), params_ptr(params_ptr) { } TiledS2R tiled_s2r; RTensor tC_rAux; // (CPY,CPY_M,CPY_N) STensorS2R tSR_sAux; // (S2R,S2R_M,S2R_N,PIPE) Params const* params_ptr; CUTLASS_DEVICE void previsit(int epi_m, int epi_n, int load_iteration, bool is_producer_load_needed) { if constexpr (EnableNullptr) { if (params_ptr->use_default) { fill(tC_rAux, params_ptr->null_default); return; } } using RLayoutS2R = decltype(cute::layout(TiledS2R{}.get_slice(0).retile_S(RTensor{}))); Tensor tSR_rAux = make_tensor(tC_rAux.data(), RLayoutS2R{}); // (S2R,S2R_M,S2R_N) int load_pipe_index = load_iteration % Stages; copy(tiled_s2r, tSR_sAux(_,_,_,load_pipe_index), tSR_rAux); } template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<Element, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux)); // (EPI_V) return tC_rAux_frg(epi_v); } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { auto [M, N, K, L] = args.problem_shape_mnkl; Tensor mAux_mn = params_ptr->tma_load_aux.get_tma_tensor(make_shape(M,N,L)); // (M,N,L) Tensor mAux = coalesce(mAux_mn, take<0,2>(args.tile_shape_mnk)); Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) >(mAux, args.tile_shape_mnk, args.tile_coord_mnkl, args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tC_rAux = make_tensor<Element>(take<0,3>(shape(tC_gAux))); // (CPY,CPY_M,CPY_N) auto tiled_s2r = conditional_return<ReferenceSrc>( make_tiled_copy_S(Copy_Atom<CopyOpS2R,Element>{}, args.tiled_copy), make_tiled_copy_D(Copy_Atom<CopyOpS2R,Element>{}, args.tiled_copy) ); Tensor sAux_epi = cute::as_position_independent_swizzle_tensor( make_tensor(make_smem_ptr(smem_aux), SmemLayout{})); // (EPI_TILE_M,EPI_TILE_N,PIPE) auto tSR_sAux = tiled_s2r.get_slice(args.thread_idx).partition_S(sAux_epi); // (S2R,S2R_M,S2R_N,PIPE) return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tiled_s2r), decltype(tSR_sAux)>( cute::move(tC_rAux), tiled_s2r, cute::move(tSR_sAux), params_ptr); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Broadcast Load Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// // Scalar broadcast // Supports reduction over multiple broadcasts to support fusions such as fp8 scaling factors template< class Element, class StrideMNL = Stride<_0,_0,_0>, int BroadcastCount = 1, template <class> class ReductionFn = multiplies > struct Sm90ScalarBroadcast { static_assert( (cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>) || // scalar broadcast, e.g. alpha (cute::is_same_v<StrideMNL, Stride<_0,_0, _1>>) || // batched scalar broadcast, e.g. per-batch alpha (cute::is_same_v<StrideMNL, Stride<_0,_0,int>>)); struct SharedStorage { }; struct Arguments { Element scalars[BroadcastCount] = {}; Element const* scalar_ptrs[BroadcastCount] = {}; StrideMNL dScalar = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter *cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } // This must be called after update_scalar is called CUTLASS_DEVICE bool is_zero() const { return scalar == Element(0); } CUTLASS_HOST_DEVICE Sm90ScalarBroadcast() { } CUTLASS_HOST_DEVICE Sm90ScalarBroadcast(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params) { // Get the scalar for non-batched broadcast if (get<2>(params_ptr->dScalar) == 0) { update_scalar(); } } Element scalar; Params const* params_ptr; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { // Get the scalar for batched broadcast if (get<2>(params_ptr->dScalar) != 0) { auto [m_coord, n_coord, k_coord, l_coord] = args.tile_coord_mnkl; update_scalar(l_coord); } return EmptyProducerLoadCallbacks{}; } struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(Element scalar) : scalar(scalar) {} Element scalar; template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<Element, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { Array<Element, FragmentSize> frg_scalar; frg_scalar.fill(scalar); return frg_scalar; } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { // Get the scalar for batched broadcast if (get<2>(params_ptr->dScalar) != 0) { auto [m_coord, n_coord, k_coord, l_coord] = args.tile_coord_mnkl; update_scalar(l_coord); } return ConsumerStoreCallbacks(scalar); } private: CUTLASS_DEVICE void update_scalar(int l_coord = 0) { int l_offset = l_coord * size<2>(params_ptr->dScalar); if (params_ptr->scalar_ptrs[0] != nullptr) { scalar = params_ptr->scalar_ptrs[0][l_offset]; } else { // batch stride is ignored for nullptr fallback scalar = params_ptr->scalars[0]; } // Do reduction over multiple broadcasts if necessary ReductionFn<Element> reduction_fn; CUTLASS_PRAGMA_UNROLL for (int i = 1; i < BroadcastCount; ++i) { if (params_ptr->scalar_ptrs[i] != nullptr) { scalar = reduction_fn(scalar, params_ptr->scalar_ptrs[i][l_offset]); } else { // batch stride is ignored for nullptr fallback scalar = reduction_fn(scalar, params_ptr->scalars[i]); } } } template<class... Xs> CUTLASS_DEVICE void update_scalar(cute::tuple<Xs...>) { // Only support multiple L-modes with fully-broadcast scalar static_assert(cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>); scalar = params_ptr->scalars[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <int StagesC, class CtaTileShapeMNK, class EpilogueTile> constexpr int compute_row_broadcast_stages() { return ceil_div(StagesC, size<1>(zipped_divide(make_layout(take<0,2>(CtaTileShapeMNK{})), EpilogueTile{}))) + 1; } } // Row vector broadcast template< // Row bcast reuses the mbarriers from the epilogue subtile load pipeline, so this must be at least // ceil_div(StagesC, epi tiles per CTA tile) + 1 to ensure no data races int Stages, class CtaTileShapeMNK, class Element, class StrideMNL = Stride<_0,_1,_0>, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Fallback scalar broadcast for nullptr params > struct Sm90RowBroadcast { static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet"); static_assert( (cute::is_same_v<StrideMNL, Stride<_0,_1, _0>>) || // row vector broadcast, e.g. per-col alpha/bias (cute::is_same_v<StrideMNL, Stride<_0,_1,int>>)); // batched row vector broadcast // Accumulator doesn't distribute row elements evenly amongst threads so we must buffer in smem struct SharedStorage { alignas(16) array_aligned<Element, size<1>(CtaTileShapeMNK{}) * Stages> smem_row; }; struct Arguments { Element const* ptr_row = nullptr; Element null_default = Element(0); StrideMNL dRow = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } CUTLASS_HOST_DEVICE Sm90RowBroadcast() { } CUTLASS_HOST_DEVICE Sm90RowBroadcast(Params const& params, SharedStorage const& shared_storage) : params(params), smem_row(const_cast<Element*>(shared_storage.smem_row.data())) { } Params params; Element* smem_row; CUTLASS_DEVICE bool is_producer_load_needed() const { return true; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_DEVICE bool is_zero() const { return (params.ptr_row == nullptr && params.null_default == Element(0)); } template <int EpiTiles, class GTensor, class STensor> struct ProducerLoadCallbacks : EmptyProducerLoadCallbacks { CUTLASS_DEVICE ProducerLoadCallbacks(GTensor&& gRow, STensor&& sRow, Params const& params) : gRow(cute::forward<GTensor>(gRow)), sRow(cute::forward<STensor>(sRow)), params(params) {} GTensor gRow; // (CTA_M,CTA_N) STensor sRow; // (CTA_M,CTA_N,PIPE) Params const& params; CUTLASS_DEVICE void begin(uint64_t* full_mbarrier_ptr, int load_iteration, bool issue_tma_load) { if constexpr (EnableNullptr) { if (params.ptr_row == nullptr) { return; } } if (issue_tma_load) { // Increment the expect-tx count of the first subtile's mbarrier by the row vector's byte-size constexpr uint32_t copy_bytes = size<1>(CtaTileShapeMNK{}) * sizeof_bits_v<Element> / 8; cutlass::arch::ClusterTransactionBarrier::expect_transaction(full_mbarrier_ptr, copy_bytes); // Issue the TMA bulk copy auto bulk_copy = Copy_Atom<SM90_BULK_COPY_AUTO, Element>{}.with(*full_mbarrier_ptr); // Filter so we don't issue redundant copies over stride-0 modes int bcast_pipe_index = (load_iteration / EpiTiles) % Stages; copy(bulk_copy, filter(gRow), filter(sRow(_,_,bcast_pipe_index))); } } }; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { auto [M, N, K, L] = args.problem_shape_mnkl; auto [m, n, k, l] = args.tile_coord_mnkl; Tensor mRow = make_tensor(make_gmem_ptr(params.ptr_row), make_shape(M,N,L), params.dRow); Tensor gRow = local_tile(mRow, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N) Tensor sRow = make_tensor(make_smem_ptr(smem_row), // (CTA_M,CTA_N,PIPE) make_shape(size<0>(CtaTileShapeMNK{}), size<1>(CtaTileShapeMNK{}), Stages), make_stride(_0{},_1{},size<1>(CtaTileShapeMNK{}))); constexpr int EpiTiles = decltype(size<1>(zipped_divide(make_layout(take<0,2>(args.tile_shape_mnk)), args.epi_tile)))::value; return ProducerLoadCallbacks<EpiTiles, decltype(gRow), decltype(sRow)>( cute::move(gRow), cute::move(sRow), params); } template <int EpiTiles, class RTensor, class STensor> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(RTensor&& tCrRow, STensor&& tCsRow, Params const& params) : tCrRow(cute::forward<RTensor>(tCrRow)), tCsRow(cute::forward<STensor>(tCsRow)), params(params) {} RTensor tCrRow; // (CPY,CPY_M,CPY_N) STensor tCsRow; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,PIPE) Params const& params; CUTLASS_DEVICE void previsit(int epi_m, int epi_n, int load_iteration, bool is_producer_load_needed) { if constexpr (EnableNullptr) { if (params.ptr_row == nullptr) { fill(tCrRow, params.null_default); return; } } if (epi_m == 0) { // Assumes M-major subtile loop // Filter so we don't issue redundant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) int bcast_pipe_index = (load_iteration / EpiTiles) % Stages; copy_aligned(filter(tCsRow(_,_,_,epi_m,epi_n,bcast_pipe_index)), filter(tCrRow)); } } template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<Element, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { Array<Element, FragmentSize> frg_row; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { frg_row[i] = tCrRow(epi_v * FragmentSize + i); } return frg_row; } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { Tensor sRow = make_tensor(make_smem_ptr(smem_row), // (CTA_M,CTA_N,PIPE) make_shape(size<0>(CtaTileShapeMNK{}), size<1>(CtaTileShapeMNK{}), Stages), make_stride(_0{},_1{},size<1>(CtaTileShapeMNK{}))); Tensor tCsRow = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,PIPE) sRow, args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tCrRow = make_tensor_like(take<0,3>(tCsRow)); // (CPY,CPY_M,CPY_N) constexpr int EpiTiles = decltype(size<1>(zipped_divide(make_layout(take<0,2>(args.tile_shape_mnk)), args.epi_tile)))::value; return ConsumerStoreCallbacks<EpiTiles, decltype(tCrRow), decltype(tCsRow)>( cute::move(tCrRow), cute::move(tCsRow), params); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Column vector broadcast template< int Stages, class CtaTileShapeMNK, class Element, class StrideMNL = Stride<_1,_0,_0>, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Fallback scalar broadcast for nullptr params > struct Sm90ColBroadcast { static_assert(Stages == 0, "Column broadcast doesn't support smem usage yet"); static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet"); static_assert( (cute::is_same_v<StrideMNL, Stride<_1,_0, _0>>) || // col vector broadcast, e.g. per-row alpha/bias (cute::is_same_v<StrideMNL, Stride<_1,_0,int>>)); // batched col vector broadcast, e.g. batched per-row bias // Accumulator distributes col elements evenly amongst threads so we can just directly load from gmem struct SharedStorage { }; struct Arguments { Element const* ptr_col = nullptr; Element null_default = Element(0); StrideMNL dCol = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } CUTLASS_DEVICE bool is_producer_load_needed() const { return false; } CUTLASS_DEVICE bool is_C_load_needed() const { return false; } CUTLASS_DEVICE bool is_zero() const { return (params.ptr_col == nullptr && params.null_default == Element(0)); } CUTLASS_HOST_DEVICE Sm90ColBroadcast() { } CUTLASS_HOST_DEVICE Sm90ColBroadcast(Params const& params, SharedStorage const& shared_storage) : params(params) { } Params params; template <class... Args> CUTLASS_DEVICE auto get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) { return EmptyProducerLoadCallbacks{}; } template<class GTensor, class RTensor> struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks { CUTLASS_DEVICE ConsumerStoreCallbacks(GTensor&& tCgCol, RTensor&& tCrCol, Params const& params) : tCgCol(cute::forward<GTensor>(tCgCol)), tCrCol(cute::forward<RTensor>(tCrCol)), params(params) {} GTensor tCgCol; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) RTensor tCrCol; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) Params const& params; CUTLASS_DEVICE void begin() { if constexpr (EnableNullptr) { if (params.ptr_col == nullptr) { fill(tCrCol, params.null_default); return; } } // Filter so we don't issue redundant copies over stride-0 modes // (only works if 0-strides are in same location, which is by construction) copy_aligned(filter(tCgCol), filter(tCrCol)); } template <typename ElementAccumulator, int FragmentSize> CUTLASS_DEVICE Array<Element, FragmentSize> visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) { Array<Element, FragmentSize> frg_col; Tensor tCrCol_mn = tCrCol(_,_,_,epi_m,epi_n); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { frg_col[i] = tCrCol_mn(epi_v * FragmentSize + i); } return frg_col; } }; template < bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy class... Args > CUTLASS_DEVICE auto get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) { auto [M, N, K, L] = args.problem_shape_mnkl; Tensor mCol = make_tensor(make_gmem_ptr(params.ptr_col), make_shape(M,N,L), params.dCol); Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) mCol, args.tile_shape_mnk, args.tile_coord_mnkl, args.epi_tile, args.tiled_copy, args.thread_idx); Tensor tCrCol = make_tensor_like(tCgCol); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N) return ConsumerStoreCallbacks<decltype(tCgCol), decltype(tCrCol)>( cute::move(tCgCol), cute::move(tCrCol), params); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Batch matrix broadcast // Only need to redefine this if we can multicast across cluster L template < int Stages, class EpilogueTile, class Element, class StrideMNL, class SmemLayoutAtom, class CopyOpS2R, int Alignment = 128 / sizeof_bits_v<Element>, bool EnableNullptr = true // Fallback scalar broadcast for nullptr params > using Sm90MatrixBroadcast = Sm90AuxLoad<Stages, EpilogueTile, Element, StrideMNL, SmemLayoutAtom, CopyOpS2R, EnableNullptr>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::fusion /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 13640 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationLeakyRelu { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; using FragmentSource = Array<ElementOutput, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta_bias; ///< scales bias tensor ElementCompute leaky_alpha; ///< leaky_alpha // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta_bias(ElementCompute(0)), leaky_alpha(ElementCompute(1)) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta_bias, ElementCompute leaky_alpha = ElementCompute(1) ): alpha(alpha), beta_bias(beta_bias), leaky_alpha(leaky_alpha) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_bias_; ElementCompute leaky_alpha_recip_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationLeakyRelu(Params const &params) { alpha_ = (params.alpha); beta_bias_ = (params.beta_bias); leaky_alpha_recip_ = (ElementCompute(params.leaky_alpha)); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_bias_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition) { if (k_partition) { beta_bias_ = ElementCompute(1); } } CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_bias_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; LeakyReLU<ComputeFragment> leakyrelu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_bias_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = leakyrelu(intermediate, leaky_alpha_recip_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; LeakyReLU<ComputeFragment> leakyrelu; //printf("in doing with bias"); if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = leakyrelu(intermediate, leaky_alpha_recip_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/linear_combination_leaky_relu.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_leaky_relu.h", "repo_id": "cutlass", "token_count": 2682 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using SIMT. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/arch/mma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_simt.h" #include "cutlass/epilogue/warp/tile_iterator_simt.h" #include "cutlass/epilogue/threadblock/default_thread_map_simt.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator_pitch_linear.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_depthwise.h" #include "cutlass/layout/permute.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute, conv::StrideSupport StrideSupport = conv::StrideSupport::kUnity, int Rank = 4 > struct DefaultEpilogueSimt { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; static conv::StrideSupport const kStrideSupport = StrideSupport; static int const kRank = Rank; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value; using PackedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout, UseCUDAStore >; using StridedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorConv< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout, UseCUDAStore, kRank >; using OutputTileIterator = typename platform::conditional<StrideSupport == cutlass::conv::StrideSupport::kUnity, PackedOutputTileIterator, StridedOutputTileIterator>::type; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueSimtStridedDgrad { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < int Rank, typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueSimtAffineRankN { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template <typename Shape_, // ThreadBlock Shape typename WarpMmaSimt_, // mma_depthwise_simt typename OutputOp_, int ElementsPerAccess_, typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>, typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > struct DefaultDirectConvEpilogueSimt { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using WarpShape = typename WarpMmaSimt::Shape; using OutputOp = OutputOp_; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static int const kElementsPerAccess = ElementsPerAccess_; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; /// Number of threads total using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN >; static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value; static int const kThreads = WarpCount::kCount * kWarpSize; // // Thread map // using OutputTileThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<ThreadBlockOutputShape::kC, ThreadBlockOutputShape::kNHW>, kThreads, kElementsPerAccess >; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorDirectConv< OutputTileThreadMap, ElementOutput, ThreadOutputShape, ThreadBlockOutputShape >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimtDirect2dConv< typename WarpMmaSimt::Shape, ThreadOutputShape, ThreadBlockOutputShape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorPitchLinear< OutputTileThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::EpilogueDepthwise< Shape, ThreadOutputShape, ThreadBlockOutputShape, WarpMmaSimt, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_simt.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_simt.h", "repo_id": "cutlass", "token_count": 4691 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for Depthwise convoltuion The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/gemm/gemm.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template <typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: ///< gemm::warp::MmaTensorOp) typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: ///< MatrixShape) > class EpilogueDepthwise { public: using Shape = Shape_; using WarpShape = typename WarpMmaOperator_::Shape; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using WarpMmaOperator = WarpMmaOperator_; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = gemm::GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN>; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); /// Shared storage allocation needed by the epilogue struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape<ThreadBlockOutputShape::kNHW, ThreadBlockOutputShape::kC>; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>; // // Data members // AlignedBuffer<Element, StorageShape::kCount> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef(storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; LongIndex warp_offset; int thread_idx; int warp_idx; int lane_idx; int warp_m, warp_n; // warp coordinates within a cta int tid_m, tid_n; // thread coordinates within a warp public: /// Constructor CUTLASS_DEVICE EpilogueDepthwise(SharedStorage &shared_storage, ///< Shared storage object int thread_idx_, ///< ID of a thread within the threadblock int warp_idx_, ///< ID of warp within threadblock int lane_idx_ ///< Id of thread within warp ) : thread_idx(thread_idx_), warp_idx(warp_idx_), lane_idx(lane_idx_), shared_load_iterator_(shared_storage.reference(), thread_idx_), warp_tile_iterator_(shared_storage.reference(), thread_idx_, lane_idx_) {} /// Streams the result to global memory CUTLASS_DEVICE void operator()(OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in ///< units of threadblock tiles) const int smem_base_offset) { ///< SMEM base offset for epilogue operation // initiate the smem base offset for different output tile. warp_tile_iterator_.set_smem_base_address(smem_base_offset); shared_load_iterator_.set_smem_base_address(smem_base_offset); if (!output_op.is_source_needed()) { compute_source_not_needed_(output_op, destination_iterator, accumulators); } else { compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); } } private: /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); source_iterator.load(source_fragment); // store to smem warp_tile_iterator_.store(accumulators); __syncthreads(); typename SharedLoadIterator::Fragment aligned_accum_fragment; // load from smem shared_load_iterator_.load(aligned_accum_fragment); typename OutputTileIterator::Fragment output_fragment; apply_output_operator_(output_fragment, output_op, aligned_accum_fragment, source_fragment); // Store to GMEM destination_iterator.store(output_fragment); } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) // store to smem warp_tile_iterator_.store(accumulators); __syncthreads(); typename SharedLoadIterator::Fragment aligned_accum_fragment; // load from smem shared_load_iterator_.load(aligned_accum_fragment); typename OutputTileIterator::Fragment output_fragment; apply_output_operator_source_not_needed_(output_fragment, output_op, aligned_accum_fragment); // Store to GMEM destination_iterator.store(output_fragment); } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i]); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_depthwise.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_depthwise.h", "repo_id": "cutlass", "token_count": 4441 }
25
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree store operations for the CUTLASS 2x epilogue */ #pragma once #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::threadblock { using namespace cute; using namespace detail; using X = Underscore; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // Elementwise Store Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// template< class ThreadMap, class Element, FloatRoundStyle RoundStyle, class StrideMNL > struct VisitorAuxStore{ struct Arguments { Element* ptr_aux = nullptr; StrideMNL dAux = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } struct SharedStorage {}; static int constexpr vec_bits = ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value; using VecType = uint_bit_t<cute::min(128, vec_bits)>; static int constexpr VecLength = sizeof(VecType) / sizeof(Element); CUTLASS_HOST_DEVICE VisitorAuxStore() { } CUTLASS_HOST_DEVICE VisitorAuxStore(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params) { } Params const* params_ptr; template <class GTensor, class RTensor, class CTensor, class ProblemShape> struct Callbacks : EmptyCallbacks { CUTLASS_DEVICE Callbacks( GTensor&& tC_gAux, RTensor&& tC_rAux, CTensor&& tC_cAux, ProblemShape problem_shape, Params const* params_ptr ): tC_gAux(cute::forward<GTensor>(tC_gAux)), tC_rAux(cute::forward<RTensor>(tC_rAux)), tC_cAux(cute::forward<CTensor>(tC_cAux)), problem_shape(problem_shape), params_ptr(params_ptr) { } GTensor tC_gAux; RTensor tC_rAux; CTensor tC_cAux; Params const* params_ptr; ProblemShape problem_shape; CUTLASS_DEVICE void begin_step(int step_idx) { clear(tC_rAux); } template <class ElementAccumulator, class ElementInput, int FragmentSize> CUTLASS_DEVICE auto // returns an Array visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInput, FragmentSize> const& frg_input) { using ConvertInput = NumericArrayConverter<Element, ElementInput, FragmentSize, RoundStyle>; ConvertInput convert_input{}; Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux)); tC_rAux_frg(frg_idx) = convert_input(frg_input); return frg_input; } CUTLASS_DEVICE void end_step(int step_idx) { auto src_v = filter(tC_rAux); auto coord_v = filter(tC_cAux(_,_,_,step_idx)); auto dst_v = filter(tC_gAux(_,_,_,step_idx)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(src_v); ++i) { bool guard = elem_less(coord_v(i), problem_shape); cutlass::arch::global_store<VecType, sizeof(VecType)>(src_v(i), (void*)&dst_v(i), guard); } } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { Tensor mAux = make_tensor( make_gmem_ptr(params_ptr->ptr_aux), problem_shape, params_ptr->dAux); // (M,N,L) // VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER Tensor tC_gAux = recast<VecType>(group_modes<3,6>(ThreadMap::partition(mAux, thread_idx, threadblock_tile_offset))); Tensor tC_rAux = make_tensor_like(take<0,3>(tC_gAux)); // Generate the pred tensor Tensor cAux = make_identity_tensor(mAux.shape()); Tensor tC_cAux = outer_partition( group_modes<3,6>(ThreadMap::partition(cAux, thread_idx, threadblock_tile_offset)), Shape<Int<VecLength>>{}, (_0{}) ); return Callbacks< decltype(tC_gAux), decltype(tC_rAux), decltype(tC_cAux), ProblemShape>( cute::move(tC_gAux), cute::move(tC_rAux), cute::move(tC_cAux), problem_shape, params_ptr ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Reduction Store Operations // ///////////////////////////////////////////////////////////////////////////////////////////////// // Helper functions template < template <class> class ReduceFn, int kThreads, class T> CUTLASS_DEVICE void intra_warp_row_reduce(T& value) { using ReduceInput = ReduceFn<T>; ReduceInput reduce_input{}; constexpr int kHalfThreads = kThreads >> 1; CUTLASS_PRAGMA_UNROLL for (int i = kHalfThreads; i > 0; i >>= 1) { value = reduce_input(value, __shfl_xor_sync(0xFFFFFFFF, value, i)); } } template < template <class> class ReduceFn, FloatRoundStyle RoundStyle, class ElementCompute, class ElementFragment, int FragmentSize> CUTLASS_DEVICE void fragment_reduce(ElementCompute& value, Array<ElementFragment, FragmentSize> const& frg) { using ReduceInput = ReduceFn<ElementCompute>; ReduceInput reduce_input{}; using ConvertInput = NumericConverter<ElementCompute, ElementFragment, RoundStyle>; ConvertInput convert_input{}; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { value = reduce_input(value, convert_input(frg[i])); } } template< template <class> class AtomicReduceFn, FloatRoundStyle RoundStyle, class ElementCompute, class ElementOutput> CUTLASS_DEVICE void atomic_reduce(ElementOutput* ptr, ElementCompute const& value) { using ReduceOutput = AtomicReduceFn<ElementOutput>; using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>; ReduceOutput reduce_output{}; ConvertOutput convert_output{}; reduce_output(ptr, convert_output(value)); } // Col vector reduction template < template <class> class RegReduceFn, template <class> class AtomicReduceFn, class ThreadMap, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_1,_0,_0> > struct VisitorColReduction { struct Arguments { ElementOutput* ptr_col = nullptr; ElementCompute reduction_identity = 0; StrideMNL dCol = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } struct SharedStorage { }; CUTLASS_HOST_DEVICE VisitorColReduction() { } CUTLASS_HOST_DEVICE VisitorColReduction(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params) { } Params const* params_ptr; template <class GTensor, class CTensor, class ProblemShape> struct Callbacks : EmptyCallbacks { CUTLASS_DEVICE Callbacks( GTensor&& tC_gCol, CTensor&& tC_cCol, ProblemShape problem_shape, Params const* params_ptr, int thread_idx ): tC_gCol(cute::forward<GTensor>(tC_gCol)), tC_cCol(cute::forward<CTensor>(tC_cCol)), m(get<0>(problem_shape)), n(get<1>(problem_shape)), params_ptr(params_ptr) { // The partial reduction results of each warp are further // reduced to the first thread in each row. // Only the first thread in each row is the writing thread is_writing_thread = thread_idx % ThreadMap::Detail::kAccessWidth == 0; } GTensor tC_gCol; CTensor tC_cCol; Params const* params_ptr; int m; int n; int curr_iter_idx; bool is_writing_thread; ElementCompute reduction_accum; CUTLASS_DEVICE void begin_row(int row_idx) { reduction_accum = ElementCompute(params_ptr->reduction_identity); } template <class ElementAccumulator, class ElementInput, int FragmentSize> CUTLASS_DEVICE auto // returns an Array visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInput, FragmentSize> const& frg_input) { curr_iter_idx = iter_idx; int coord_n = get<1>(tC_cCol(column_idx, row_idx, iter_idx)); if (coord_n < n) { fragment_reduce<RegReduceFn, RoundStyle>(reduction_accum, frg_input); } // Intra-warp reduction if (column_idx + 1 == ThreadMap::Iterations::kColumn) { intra_warp_row_reduce<RegReduceFn, ThreadMap::Detail::kAccessWidth>(reduction_accum); } return frg_input; } CUTLASS_DEVICE auto end_row(int row_idx) { bool guard = get<0>(tC_cCol(_0{}, row_idx,curr_iter_idx)) < m; if (guard && is_writing_thread) { atomic_reduce<AtomicReduceFn, RoundStyle>(&tC_gCol(row_idx,curr_iter_idx), reduction_accum); } } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { Tensor mCol = make_tensor( make_gmem_ptr(params_ptr->ptr_col), problem_shape, params_ptr->dCol); // FRAGMENT_ROW, (ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER) Tensor tC_gCol = group_modes<1,4>( ThreadMap::partition(mCol, thread_idx, threadblock_tile_offset)(_0{},_0{},_,_,_,_)); // Generate the pred tensor Tensor cCol = make_identity_tensor(mCol.shape()); // FRAGMENT_COL, FRAGMENT_ROW, (ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER) Tensor tC_cCol = group_modes<2,5>( ThreadMap::partition(cCol, thread_idx, threadblock_tile_offset)(_0{},_,_,_,_,_)); return Callbacks< decltype(tC_gCol), decltype(tC_cCol), ProblemShape>( cute::move(tC_gCol), cute::move(tC_cCol), problem_shape, params_ptr, thread_idx ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Row vector reduction template < template <class> class RegReduceFn, template <class> class AtomicReduceFn, class ThreadMap, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_0,_1,_0> > struct VisitorRowReduction { struct Arguments { ElementOutput* ptr_row = nullptr; ElementCompute reduction_identity = 0; StrideMNL dRow = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } using SharedStorageShape = decltype(select<0,1,2,3,5,8,10>(typename ThreadMap::ThreadMapShape{})); struct SharedStorage { AlignedArray<ElementCompute, size(SharedStorageShape{}), 16> reduction; }; static int constexpr vec_bits = ThreadMap::kElementsPerAccess * sizeof_bits<ElementOutput>::value; using VecType = uint_bit_t<cute::min(128, vec_bits)>; CUTLASS_HOST_DEVICE VisitorRowReduction() { } CUTLASS_HOST_DEVICE VisitorRowReduction(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params), smem_reduce(const_cast<ElementCompute*>(shared_storage.reduction.data())) { } Params const* params_ptr; ElementCompute* smem_reduce; template < class RTensorR2S, class STensorR2S, class CTensorR2S, class STensorS2R, class RTensorS2R, class CTensorS2R, class GTensor, class CTensor, class ProblemShape> struct Callbacks : EmptyCallbacks { CUTLASS_DEVICE Callbacks( // R->S RTensorR2S&& tRS_rSrc, STensorR2S&& tRS_sRows, CTensorR2S&& tRS_cSrc, // S->R STensorS2R&& tSR_sRows, RTensorS2R&& tSR_rRows, CTensorS2R&& tSR_cRows, // R->G GTensor&& tC_gRow, CTensor&& tC_cRow, ProblemShape problem_shape, Params const* params_ptr ): // R->S tRS_rSrc(cute::forward<RTensorR2S>(tRS_rSrc)), tRS_sRows(cute::forward<STensorR2S>(tRS_sRows)), tRS_cSrc(cute::forward<CTensorR2S>(tRS_cSrc)), // S->R tSR_sRows(cute::forward<STensorS2R>(tSR_sRows)), tSR_rRows(cute::forward<RTensorS2R>(tSR_rRows)), tSR_cRows(cute::forward<CTensorS2R>(tSR_cRows)), // R->G tC_gRow(cute::forward<GTensor>(tC_gRow)), tC_cRow(cute::forward<CTensor>(tC_cRow)), m(get<0>(problem_shape)), n(get<1>(problem_shape)), params_ptr(params_ptr) { } // R->S RTensorR2S tRS_rSrc; STensorR2S tRS_sRows; CTensorR2S tRS_cSrc; // S->R STensorS2R tSR_sRows; RTensorS2R tSR_rRows; CTensorS2R tSR_cRows; // R->G GTensor tC_gRow; CTensor tC_cRow; Params const* params_ptr; int n; int m; CUTLASS_DEVICE void begin_epilogue() { fill(tRS_rSrc, params_ptr->reduction_identity); } template <class ElementAccumulator, class ElementInput, int FragmentSize> CUTLASS_DEVICE auto // returns an Array visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInput, FragmentSize> const& frg_input) { using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; ConvertInput convert_input{}; Tensor tRS_rRow_frg = recast<Array<ElementCompute, FragmentSize>>(coalesce(tRS_rSrc)); int coord_m = get<0>(tRS_cSrc(column_idx,row_idx,iter_idx)); if (coord_m < m) reduction(tRS_rRow_frg[column_idx], convert_input(frg_input)); return frg_input; } CUTLASS_DEVICE void end_epilogue() { // // Store the partially reduced value to SMEM // // Guard against uses of the existing SMEM tile __syncthreads(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(tRS_rSrc); ++i) { copy_vec<VecType>(filter(tRS_rSrc), filter(tRS_sRows)); } __syncthreads(); // // Now, threads are assigned several columns of the output. They fetch over all rows from // the compacted SMEM tile and perform a reduction. // fill(tSR_rRows, params_ptr->reduction_identity); using ReduceInputReg = RegReduceFn<ElementCompute>; ReduceInputReg reduce_input_reg{}; CUTLASS_PRAGMA_UNROLL for (int j = 0; j < size(tSR_rRows); ++j) { if (get<0>(tSR_cRows(j)) < get<1>(typename ThreadMap::CtaShapeMNL{}) && get<1>(tC_cRow(j)) < n) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(tSR_sRows) / size(tSR_rRows); ++i) { tSR_rRows(j) = reduce_input_reg(tSR_rRows(j), tSR_sRows(i + j * size(tSR_sRows) / size(tSR_rRows))); } atomic_reduce<AtomicReduceFn, RoundStyle>(&tC_gRow(j), tSR_rRows(j)); } } } private: template <int FragmentSize> CUTLASS_DEVICE ElementCompute reduction(Array<ElementCompute, FragmentSize>& reduce_buffer, Array<ElementCompute, FragmentSize> const& result) { using ReduceInput = RegReduceFn<ElementCompute>; ReduceInput reduce_input{}; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < FragmentSize; ++i) { reduce_buffer[i] = reduce_input(reduce_buffer[i], result[i]); } } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { Tensor mRow = make_tensor( make_gmem_ptr(params_ptr->ptr_row), problem_shape, params_ptr->dRow); // // Step 1: reduce fragment input (Src) into tRS_rSrc // // VECTOR,FRAGMENT_COL Tensor tRS_rSrc = make_tensor<ElementCompute>(select<0,2>(typename ThreadMap::ThreadMapShape{})); Tensor cSrc = make_identity_tensor(mRow.shape()); // FRAGMENT_COLUMN, FRAGMENT_ROW, (ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER) Tensor tRS_cSrc = group_modes<2,5>(ThreadMap::partition(cSrc, thread_idx, threadblock_tile_offset)(_0{},_,_,_,_,_)); // // Step 2: copy the partial results in tRS_rSrc to sRows in shared memory // // VECTOR,ACCESS_WIDTH,FRAGMENT_COL,ACCESS_ROWS,WARPS_PER_ROW,GROUPS,CLUSTERS Tensor sRows = make_tensor( make_smem_ptr(smem_reduce), SharedStorageShape{} ); auto [lane_col_coord, lane_row_coord, warp_row_coord, group_coord, cluster_coord] = ThreadMap::tid2coord(thread_idx); Tensor tRS_sRows = sRows(_,lane_col_coord,_,lane_row_coord,warp_row_coord,group_coord,cluster_coord); // // Step 3: copy the partial results in sRows to tSR_sRow for reduction // // VECTOR*ACCESS_WIDTH*FRAGMENT_COL,ACCESS_ROWS*WARPS_PER_ROW*GROUPS*CLUSTERS Tensor sRows_nm = coalesce(group_modes<1,5>(group_modes<0,3>(sRows)), Shape<_1,_1>{}); // SMEM_ROW/THREADS,ACCESS_ROWS*WARPS_PER_ROW*GROUPS*CLUSTERS Tensor tSR_sRows = outer_partition(sRows_nm, Shape<Int<ThreadMap::kThreads>,_1>{}, thread_idx); // SMEM_ROW/THREADS Tensor tSR_rRows = make_tensor_like(tSR_sRows(_,_0{})); // Coord Tensor cRows_nm = make_identity_tensor(sRows_nm.shape()); Tensor tSR_cRows = outer_partition(cRows_nm, Shape<Int<ThreadMap::kThreads>,_1>{}, thread_idx)(_,_0{}); // // Step 4: atomically reduce the results to global memory // Tensor tC_gRow = outer_partition( // Cta tile local_tile( mRow, typename ThreadMap::CtaShapeMNL{}, make_coord(_,_,_),Step<_1,_1, X>{} )(_,_,threadblock_tile_offset.m(),threadblock_tile_offset.n(),threadblock_tile_offset.k()), // Partition to threads Shape<_1,Int<ThreadMap::kThreads>>{}, thread_idx )(_0{},_); Tensor cRow = make_identity_tensor(mRow.shape()); Tensor tC_cRow = outer_partition( // Cta tile local_tile( cRow, typename ThreadMap::CtaShapeMNL{}, make_coord(_,_,_), Step<_1,_1, X>{} )(_,_,threadblock_tile_offset.m(),threadblock_tile_offset.n(),threadblock_tile_offset.k()), // Partition to threads Shape<_1,Int<ThreadMap::kThreads>>{}, thread_idx )(_0{},_); return Callbacks< decltype(tRS_rSrc), decltype(tRS_sRows), decltype(tRS_cSrc), decltype(tSR_sRows), decltype(tSR_rRows), decltype(tSR_cRows), decltype(tC_gRow), decltype(tC_cRow), ProblemShape>( // R->S cute::move(tRS_rSrc), cute::move(tRS_sRows), cute::move(tRS_cSrc), // S->R cute::move(tSR_sRows), cute::move(tSR_rRows), cute::move(tSR_cRows), // R->G cute::move(tC_gRow), cute::move(tC_cRow), problem_shape, params_ptr ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Scalar reduction template < template <class> class RegReduceFn, template <class> class AtomicReduceFn, class ThreadMap, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class StrideMNL = Stride<_0,_0,_0> > struct VisitorScalarReduction { static_assert( (cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>) || // scalar reduction, e.g. tensor max element (cute::is_same_v<StrideMNL, Stride<_0,_0, _1>>) || // batched scalar reduction, e.g. per-batch max element (cute::is_same_v<StrideMNL, Stride<_0,_0,int>>)); struct Arguments { ElementOutput* ptr_scalar = nullptr; ElementCompute reduction_identity = 0; StrideMNL dScalar = {}; }; using Params = Arguments; template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } struct SharedStorage { }; CUTLASS_HOST_DEVICE VisitorScalarReduction(){ }; CUTLASS_HOST_DEVICE VisitorScalarReduction(Params const& params, SharedStorage const& shared_storage) : params_ptr(&params) { } Params const* params_ptr; template <class CTensor, class GTensor, class ProblemShape> struct Callbacks : EmptyCallbacks { CUTLASS_DEVICE Callbacks( CTensor&& tC_cSrc, GTensor&& tC_gScalar, ProblemShape problem_shape, Params const* params_ptr, int thread_idx ): tC_cSrc(cute::forward<CTensor>(tC_cSrc)), tC_gScalar(cute::forward<GTensor>(tC_gScalar)), problem_shape(problem_shape), params_ptr(params_ptr) { // The partial reduction results of each warp are further // reduced to this first thread. // Only the first thread of each warp is the writing thread is_writing_thread = thread_idx % ThreadMap::kWarpSize == 0; } GTensor tC_gScalar; CTensor tC_cSrc; Params const* params_ptr; ProblemShape problem_shape; bool is_writing_thread; ElementCompute reduction_accum; CUTLASS_DEVICE void begin_epilogue() { reduction_accum = ElementCompute(params_ptr->reduction_identity); } template <class ElementAccumulator, class ElementInput, int FragmentSize> CUTLASS_DEVICE auto visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInput, FragmentSize> const& frg_input) { auto coord = tC_cSrc(column_idx, row_idx, iter_idx); if (elem_less(coord, problem_shape)) { fragment_reduce<RegReduceFn, RoundStyle>(reduction_accum, frg_input); } return frg_input; } CUTLASS_DEVICE auto end_epilogue() { // Intra-warp reduction intra_warp_row_reduce<RegReduceFn, ThreadMap::kWarpSize>(reduction_accum); // Atomically reduce to global memory atomic_reduce<AtomicReduceFn, RoundStyle>(&tC_gScalar(_0{},_0{}), reduction_accum); } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { Tensor cSrc = make_identity_tensor(problem_shape); // FRAGMENT_COL, FRAGMENT_ROW, (ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER) Tensor tC_cSrc = group_modes<2,5>( ThreadMap::partition(cSrc, thread_idx, threadblock_tile_offset)(_0{},_,_,_,_,_) ); Tensor mScalar = make_tensor( make_gmem_ptr(params_ptr->ptr_scalar), problem_shape, params_ptr->dScalar ); Tensor tC_gScalar = mScalar(_,_,threadblock_tile_offset.k()); return Callbacks< decltype(tC_cSrc), decltype(tC_gScalar), ProblemShape>( cute::move(tC_cSrc), cute::move(tC_gScalar), problem_shape, params_ptr, thread_idx ); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::threadblock /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_store.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_store.hpp", "repo_id": "cutlass", "token_count": 10363 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading. When the fragment is loaded into registers, it matches the row-major thread map assumed by the predicated tile iterator writing to global memory. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template <typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap) typename Element_, ///< Element data type int MaxAlignment = ThreadMap_::kElementsPerAccess *sizeof_bits<Element_>::value / 8> class SharedLoadIteratorPitchLinear { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment); static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray<Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment)>; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer uint8_t *byte_pointer_; /// Stride along adjacent rows int stride_; /// Base address offset Index base_smem_address_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIteratorPitchLinear(TensorRef ref, int thread_idx) : byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())), stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8), base_smem_address_(0) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointer // thread_offset.row() is contiguous dim // thread_offset.column() is stride dim byte_pointer_ += thread_offset.row() * sizeof(AccessType) / kElementsPerAccess+ thread_offset.column() * stride_ ; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { byte_pointer_ += offset.row() * ThreadMap::StorageShape::kContiguous * sizeof(AccessType) / kElementsPerAccess + offset.column() * ThreadMap::StorageShape::kStrided * stride_; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { uint8_t const *byte_pointer = byte_pointer_ + s * ThreadMap::Delta::kStrided * stride_ + c * ThreadMap::Delta::kContiguous * ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value / 8 + pointer_offset * sizeof_bits<Element>::value / 8 + base_smem_address_; int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { frag_ptr[frag_base_idx * kLoadsPerAccess + v] = memory_pointer[v]; } } } } /// Loads a fragment from memory CUTLASS_DEVICE void set_smem_base_address(Index address) { base_smem_address_ = address; } /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_pitch_linear.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_pitch_linear.h", "repo_id": "cutlass", "token_count": 2331 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <cstdint> #include <cmath> #include <type_traits> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/uint128.h" #include "cutlass/coord.h" #include "cutlass/half.h" /** * \file * \brief Math utilities */ namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> CUTLASS_HOST_DEVICE void swap(T &lhs, T &rhs) { T tmp = lhs; lhs = rhs; rhs = tmp; } /****************************************************************************** * Static math utilities ******************************************************************************/ /// Mixed precision dot product template <typename Index, typename LongIndex, int N> CUTLASS_HOST_DEVICE LongIndex dot( Coord<N, Index> const &coord, Coord<N, LongIndex> const &stride, LongIndex acc = LongIndex()) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < N; ++n) { acc += LongIndex(coord[n]) * stride[n]; } return acc; } /** * Statically determine if N is a power-of-two */ template <int N> struct is_pow2 { static bool const value = ((N & (N - 1)) == 0); }; /** * Statically determine log2(N), rounded down */ template <int N, int CurrentVal = N, int Count = 0> struct log2_down { /// Static logarithm value enum { value = log2_down<N, (CurrentVal >> 1), Count + 1>::value }; }; // Base case template <int N, int Count> struct log2_down<N, 1, Count> { enum { value = Count }; }; /** * Statically determine log2(N), rounded up */ template <int N, int CurrentVal = N, int Count = 0> struct log2_up { /// Static logarithm value enum { value = log2_up<N, (CurrentVal >> 1), Count + 1>::value }; }; // Base case template <int N, int Count> struct log2_up<N, 1, Count> { enum { value = ((1 << Count) < N) ? Count + 1 : Count }; }; /** * Statically estimate sqrt(N) to the nearest power-of-two */ template <int N> struct sqrt_est { enum { value = 1 << (log2_up<N>::value / 2) }; }; /** * For performing a constant-division with a compile-time assertion that the * Divisor evenly-divides the Dividend. */ template <int Dividend, int Divisor> struct divide_assert { enum { value = Dividend / Divisor }; static_assert((Dividend % Divisor == 0), "Not an even multiple"); }; /****************************************************************************** * Rounding ******************************************************************************/ /** * Round dividend up to the nearest multiple of divisor */ template <typename dividend_t, typename divisor_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 dividend_t round_nearest(dividend_t dividend, divisor_t divisor) { return ((dividend + divisor - 1) / divisor) * divisor; } template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t abs_for_integer(value_t a) { return ((a > 0) ? a : -a); } /** * Greatest common divisor */ template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t gcd(value_t a, value_t b) { for (;;) { if (a == 0) return cutlass::abs_for_integer(b); b %= a; if (b == 0) return cutlass::abs_for_integer(a); a %= b; } } /** * Least common multiple */ template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t lcm(value_t a, value_t b) { value_t temp = cutlass::gcd(a, b); return (temp != 0) ? value_t(cutlass::abs_for_integer(a) / temp * cutlass::abs_for_integer(b)) : value_t{}; } /** * Greatest common divisor */ template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t gcd_cxx11(value_t a, value_t b) { return (a == 0 || b == 0) ? cutlass::abs_for_integer(a | b) : cutlass::gcd_cxx11(b, a % b); } /** * Least common multiple */ template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t lcm_cxx11(value_t a, value_t b) { return cutlass::gcd_cxx11(a, b) ? (cutlass::abs_for_integer(a) / cutlass::gcd_cxx11(a, b) * cutlass::abs_for_integer(b)) : value_t{}; } /// Returns the smallest value in the half-open range [a, a+b) that is a multiple of b CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 int round_up(int a, int b) { return ((a + b - 1) / b) * b; } /// Returns the ceiling of (a / b) CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 int ceil_div(int a, int b) { return (a + b - 1) / b; } ///////////////////////////////////////////////////////////////////////////////////////////////// /** * log2 computation, what's the * difference between the below codes and * log2_up/down codes? */ template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t clz(value_t x) { for (int i = 31; i >= 0; --i) { if ((1 << i) & x) return value_t(31 - i); } return value_t(32); } template <typename value_t> CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 value_t find_log2(value_t x) { int a = int(31 - clz(x)); a += (x & (x - 1)) != 0; // Round up, add 1 if not a power of 2. return a; } /** * Find divisor, using find_log2 */ CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 void find_divisor(unsigned int& mul, unsigned int& shr, unsigned int denom) { if (denom == 1) { mul = 0; shr = 0; } else { unsigned int p = 31 + find_log2(denom); unsigned m = unsigned(((1ull << p) + unsigned(denom) - 1) / unsigned(denom)); mul = m; shr = p - 32; } } /** * Find quotient and remainder using device-side intrinsics */ CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 void fast_divmod(int& quo, int& rem, int src, int div, unsigned int mul, unsigned int shr) { #if defined(__CUDA_ARCH__) // Use IMUL.HI if div != 1, else simply copy the source. quo = (div != 1) ? __umulhi(src, mul) >> shr : src; #else quo = int((div != 1) ? int(((int64_t)src * mul) >> 32) >> shr : src); #endif // The remainder. rem = src - (quo * div); } // For long int input CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 void fast_divmod(int& quo, int64_t& rem, int64_t src, int div, unsigned int mul, unsigned int shr) { #if defined(__CUDA_ARCH__) // Use IMUL.HI if div != 1, else simply copy the source. quo = (div != 1) ? __umulhi(src, mul) >> shr : src; #else quo = int((div != 1) ? ((src * mul) >> 32) >> shr : src); #endif // The remainder. rem = src - (quo * div); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Object to encapsulate the fast division+modulus operation. /// /// This object precomputes two values used to accelerate the computation and is best used /// when the divisor is a grid-invariant. In this case, it may be computed in host code and /// marshalled along other kernel arguments using the 'Params' pattern. /// /// Example: /// /// /// int quotient, remainder, dividend, divisor; /// /// FastDivmod divmod(divisor); /// /// divmod(quotient, remainder, dividend); /// /// // quotient = (dividend / divisor) /// // remainder = (dividend % divisor) /// struct FastDivmod { int divisor; unsigned int multiplier; unsigned int shift_right; /// Find quotient and remainder using device-side intrinsics CUTLASS_HOST_DEVICE void fast_divmod(int& quotient, int& remainder, int dividend) const { #if defined(__CUDA_ARCH__) // Use IMUL.HI if divisor != 1, else simply copy the source. quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend; #else quotient = int((divisor != 1) ? int(((int64_t)dividend * multiplier) >> 32) >> shift_right : dividend); #endif // The remainder. remainder = dividend - (quotient * divisor); } /// For long int input CUTLASS_HOST_DEVICE void fast_divmod(int& quotient, int64_t& remainder, int64_t dividend) const { #if defined(__CUDA_ARCH__) // Use IMUL.HI if divisor != 1, else simply copy the source. quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend; #else quotient = int((divisor != 1) ? ((dividend * multiplier) >> 32) >> shift_right : dividend); #endif // The remainder. remainder = dividend - (quotient * divisor); } /// Construct the FastDivmod object, in host code ideally. /// /// This precomputes some values based on the divisor and is computationally expensive. CUTLASS_HOST_DEVICE FastDivmod(): divisor(0), multiplier(0), shift_right(0) { } CUTLASS_HOST_DEVICE FastDivmod(int divisor): divisor(divisor) { if (divisor != 1) { unsigned int p = 31 + find_log2(divisor); unsigned m = unsigned(((1ull << p) + unsigned(divisor) - 1) / unsigned(divisor)); multiplier = m; shift_right = p - 32; } else { multiplier = 0; shift_right = 0; } } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE void operator()(int &quotient, int &remainder, int dividend) const { fast_divmod(quotient, remainder, dividend); } /// Computes integer division using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE int div(int dividend) const { int quotient, remainder; fast_divmod(quotient, remainder, dividend); return quotient; } /// Alias for `div` to match the interface of FastDivmodU64 CUTLASS_HOST_DEVICE int divide(int dividend) const { return div(dividend); } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. /// /// Simply returns the quotient CUTLASS_HOST_DEVICE int divmod(int &remainder, int dividend) const { int quotient; fast_divmod(quotient, remainder, dividend); return quotient; } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE void operator()(int &quotient, int64_t &remainder, int64_t dividend) const { fast_divmod(quotient, remainder, dividend); } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE int divmod(int64_t &remainder, int64_t dividend) const { int quotient; fast_divmod(quotient, remainder, dividend); return quotient; } /// Returns the divisor when cast to integer CUTLASS_HOST_DEVICE operator int() const { return divisor; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Object to encapsulate the fast division+modulus operation for 64b integer division. /// /// This object precomputes two values used to accelerate the computation and is best used /// when the divisor is a grid-invariant. In this case, it may be computed in host code and /// marshalled along other kernel arguments using the 'Params' pattern. /// /// Example: /// /// /// uint64_t quotient, remainder, dividend, divisor; /// /// FastDivmodU64 divmod(divisor); /// /// divmod(quotient, remainder, dividend); /// /// // quotient = (dividend / divisor) /// // remainder = (dividend % divisor) /// struct FastDivmodU64 { uint64_t divisor; uint64_t multiplier; unsigned int shift_right; unsigned int round_up; // // Static methods // /// Computes b, where 2^b is the greatest power of two that is less than or equal to x CUTLASS_HOST_DEVICE static uint32_t integer_log2(uint64_t x) { uint32_t n = 0; while (x >>= 1) { ++n; } return n; } /// Default ctor CUTLASS_HOST_DEVICE FastDivmodU64(): divisor(0), multiplier(0), shift_right(0), round_up(0) { } /// Construct the FastDivmod object, in host code ideally. /// /// This precomputes some values based on the divisor and is computationally expensive. CUTLASS_HOST_DEVICE FastDivmodU64(uint64_t divisor_): divisor(divisor_), multiplier(1), shift_right(0), round_up(0) { if (divisor) { shift_right = integer_log2(divisor); if ((divisor & (divisor - 1)) == 0) { multiplier = 0; } else { uint64_t power_of_two = (uint64_t(1) << shift_right); uint64_t multiplier_lo = uint128_t(0, power_of_two) / divisor; multiplier = uint128_t(power_of_two, power_of_two) / divisor; round_up = (multiplier_lo == multiplier ? 1 : 0); } } } /// Returns the quotient of floor(dividend / divisor) CUTLASS_HOST_DEVICE uint64_t divide(uint64_t dividend) const { uint64_t quotient = 0; #ifdef __CUDA_ARCH__ uint64_t x = dividend; if (multiplier) { x = __umul64hi(dividend + round_up, multiplier); } quotient = (x >> shift_right); #else quotient = dividend / divisor; #endif return quotient; } /// Computes the remainder given a computed quotient and dividend CUTLASS_HOST_DEVICE uint64_t modulus(uint64_t quotient, uint64_t dividend) const { return uint32_t(dividend - quotient * divisor); } /// Returns the quotient of floor(dividend / divisor) and computes the remainder CUTLASS_HOST_DEVICE uint64_t divmod(uint64_t &remainder, uint64_t dividend) const { uint64_t quotient = divide(dividend); remainder = modulus(quotient, dividend); return quotient; } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE void operator()(uint64_t &quotient, uint64_t &remainder, uint64_t dividend) const { quotient = divmod(remainder, dividend); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Object to encapsulate the fast division+modulus operation for 64b integer division /// in which the divisor is a power of two. struct FastDivmodU64Pow2 { uint64_t divisor; unsigned int shift_right; /// Default ctor CUTLASS_HOST_DEVICE FastDivmodU64Pow2(): divisor(0), shift_right(0) { } /// Construct the FastDivmod object, in host code ideally. /// /// This precomputes some values based on the divisor and is computationally expensive. CUTLASS_HOST_DEVICE FastDivmodU64Pow2(uint64_t divisor_): divisor(divisor_), shift_right(FastDivmodU64::integer_log2(divisor_)) { } /// Returns the quotient of floor(dividend / divisor) CUTLASS_HOST_DEVICE uint64_t divide(uint64_t dividend) const { return dividend >> shift_right; } /// Computes the remainder given a computed quotient and dividend CUTLASS_HOST_DEVICE uint64_t modulus(uint64_t dividend) const { // See https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#division-modulo-operations return dividend & (divisor - 1); } /// Returns the quotient of floor(dividend / divisor) and computes the remainder CUTLASS_HOST_DEVICE uint64_t divmod(uint64_t &remainder, uint64_t dividend) const { uint64_t quotient = divide(dividend); remainder = modulus(dividend); return quotient; } /// Computes integer division and modulus using precomputed values. This is computationally /// inexpensive. CUTLASS_HOST_DEVICE void operator()(uint64_t &quotient, uint64_t &remainder, uint64_t dividend) const { quotient = divmod(remainder, dividend); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Computes the coordinate decomposition from a linear index (64-bit linear index => coord<int32_t>) /// /// This decomposition is accelerated by the FastDivmodU64 object. It is assumed that /// a coordinate of <Rank> indices can be decomposed by <Rank - 1> div/mod operations. /// Note, is assumed that element divmod[0] divides by extent[1]. /// /// For example, assume 4-D coordinate (n, p, q, c) is mapped to a linear index `npqc`. This /// can be decomposed via three divide and modulus operations: /// /// c = npqc % C; | divmod[2] = FastDivmodU64(C) /// npq = npqc / C; | coord[3] = c /// /// q = npq % Q; | divmod[1] = FastDivmodU64(Q) /// np = npq / Q; | coord[2] = q /// /// p = np % P; | divmod[0] = FastDivmodU64(P) /// n = np / P; | coord[1] = p /// /// | coord[0] = n /// template <int Rank> CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition( uint64_t linear_idx, ///< Linear index to decompose FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater."); Coord<Rank> coord; CUTLASS_PRAGMA_UNROLL for (int i = Rank; i > 1; --i) { uint64_t remainder; linear_idx = divmod[i - 2].divmod(remainder, linear_idx); coord[i - 1] = int(remainder); } coord[0] = int(linear_idx); return coord; } /// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>) template <int Rank> CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition( int linear_idx, ///< Linear index to decompose FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater."); Coord<Rank> coord; CUTLASS_PRAGMA_UNROLL for (int i = Rank; i > 1; --i) { int remainder; linear_idx = divmod[i - 2].divmod(remainder, linear_idx); coord[i - 1] = int(remainder); } coord[0] = int(linear_idx); return coord; } template <int Rank> CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian( uint64_t linear_idx, ///< Linear index to decompose FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater."); Coord<Rank> coord; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank - 1; ++i) { uint64_t remainder; linear_idx = divmod[i].divmod(remainder, linear_idx); coord[i] = int(remainder); } coord[Rank - 1] = int(linear_idx); return coord; } /// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>) template <int Rank> CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian( int linear_idx, ///< Linear index to decompose FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater."); Coord<Rank> coord; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank - 1; ++i) { int remainder; linear_idx = divmod[i].divmod(remainder, linear_idx); coord[i] = int(remainder); } coord[Rank - 1] = int(linear_idx); return coord; } /// Safely computes the offset of a linear index in bytes for all types template <typename Element> CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index) { static_assert( (sizeof_bits<Element>::value >= 8 && !(sizeof_bits<Element>::value % 8)) || (sizeof_bits<Element>::value < 8 && !(8 % sizeof_bits<Element>::value)), "Size of numeric type in bits must either be divisible by 8 bits, or 8 bits must be divisible by the size."); if (sizeof_bits<Element>::value >= 8) { return index * (sizeof_bits<Element>::value / 8); } else { int const kElementsPerByte = ((8 / sizeof_bits<Element>::value) + ((sizeof_bits<Element>::value >= 8) ? 1 : 0)); return index / kElementsPerByte; } } CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index, int64_t element_sizeof_bits) { if (element_sizeof_bits >= 8) { return index * (element_sizeof_bits / 8); } else { int64_t const kElementsPerByte = ((8 / element_sizeof_bits) + ((element_sizeof_bits >= 8) ? 1 : 0)); return index / kElementsPerByte; } } ///////////////////////////////////////////////////////////////////////////////////////////////// // Min/Max ///////////////////////////////////////////////////////////////////////////////////////////////// template <int A, int B> struct Min { static int const kValue = (A < B) ? A : B; }; template <int A, int B> struct Max { static int const kValue = (A > B) ? A : B; }; CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 int const_min(int a, int b) { return (b < a ? b : a); } CUTLASS_HOST_DEVICE CUTLASS_CONSTEXPR_IF_CXX17 int const_max(int a, int b) { return (b > a ? b : a); } template <typename T> CUTLASS_HOST_DEVICE T fast_min(T a, T b) { return (b < a ? b : a); } template <> CUTLASS_HOST_DEVICE float fast_min(float a, float b) { return fminf(a, b); } template <typename T> CUTLASS_HOST_DEVICE T fast_max(T a, T b) { return (a < b ? b : a); } template <> CUTLASS_HOST_DEVICE float fast_max(float a, float b) { return fmaxf(a, b); } CUTLASS_HOST_DEVICE float fast_cos(float theta) { #if defined(__CUDA_ARCH__) return ::cosf(theta); #else return std::cos(theta); #endif } CUTLASS_HOST_DEVICE double fast_cos(double theta) { #if defined(__CUDA_ARCH__) return ::cos(theta); #else return std::cos(theta); #endif } CUTLASS_HOST_DEVICE float fast_sin(float theta) { #if defined(__CUDA_ARCH__) return ::sinf(theta); #else return std::sin(theta); #endif } CUTLASS_HOST_DEVICE double fast_sin(double theta) { #if defined(__CUDA_ARCH__) return ::sin(theta); #else return std::sin(theta); #endif } CUTLASS_HOST_DEVICE float fast_acos(float theta) { #if defined(__CUDA_ARCH__) return ::acosf(theta); #else return std::acos(theta); #endif } CUTLASS_HOST_DEVICE double fast_acos(double theta) { #if defined(__CUDA_ARCH__) return ::acos(theta); #else return std::acos(theta); #endif } CUTLASS_HOST_DEVICE float fast_asin(float theta) { #if defined(__CUDA_ARCH__) return ::asinf(theta); #else return std::asin(theta); #endif } CUTLASS_HOST_DEVICE double fast_asin(double theta) { #if defined(__CUDA_ARCH__) return ::asin(theta); #else return std::asin(theta); #endif } CUTLASS_HOST_DEVICE float fast_sqrt(float theta) { #if defined(__CUDA_ARCH__) return ::sqrtf(theta); #else return std::sqrt(theta); #endif } CUTLASS_HOST_DEVICE double fast_sqrt(double theta) { #if defined(__CUDA_ARCH__) return ::sqrt(theta); #else return std::sqrt(theta); #endif } CUTLASS_HOST_DEVICE float fast_exp(float x) { #if defined(__CUDA_ARCH__) return ::expf(x); #else return std::exp(x); #endif } CUTLASS_HOST_DEVICE double fast_exp(double x) { #if defined(__CUDA_ARCH__) return ::exp(x); #else return std::exp(x); #endif } CUTLASS_HOST_DEVICE half_t fast_exp(half_t x) { #if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750) return (half_t)(::hexp(x.to_half())); #else return (half_t)(fast_exp(float(x))); #endif } CUTLASS_HOST_DEVICE float fast_log(float x) { #if defined(__CUDA_ARCH__) return ::logf(x); #else return std::log(x); #endif } CUTLASS_HOST_DEVICE double fast_log(double x) { #if defined(__CUDA_ARCH__) return ::log(x); #else return std::log(x); #endif } CUTLASS_HOST_DEVICE float fast_tanh(float x) { #if defined(__CUDA_ARCH__) #if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750) float y; asm volatile ( "tanh.approx.f32 %0, %1; " : "=f"(y) : "f"(x)); return y; #else return ::tanhf(x); #endif #else return std::tanh(x); #endif } CUTLASS_HOST_DEVICE double fast_tanh(double x) { #if defined(__CUDA_ARCH__) return ::tanh(x); #else return std::tanh(x); #endif } CUTLASS_HOST_DEVICE half_t fast_tanh(half_t x) { #if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750) asm volatile ( "tanh.approx.f16 %0, %1;" : "=h"(x.raw()) : "h"(x.raw())); return x; #else return half_t(fast_tanh(float(x))); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct fast_exp_op { CUTLASS_HOST_DEVICE T operator()(T const &rhs) const { return fast_exp(rhs); } }; #if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750) template <int N> struct fast_exp_op<Array<half_t, N>> { CUTLASS_DEVICE Array<half_t, N> operator()(Array<half_t, N> const &rhs) const { Array<half_t, N> result; // use x2 specialization __half2 const *in = reinterpret_cast<__half2 const *>(&rhs); __half2 *out = reinterpret_cast<__half2 *>(&result); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { out[i] = ::h2exp(in[i]); } // residual if (N % 2) { half_t last = rhs[N - 1]; result[N - 1] = half_t(::hexp(last.to_half())); } return result; } }; #endif // #if defined(__CUDA_ARCH__) template <typename T, int N> struct fast_exp_op<Array<T, N>> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &rhs) const { fast_exp_op<T> fast_op; Array<T, N> y; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = fast_op(rhs[i]); } return y; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct fast_tanh_op { CUTLASS_HOST_DEVICE T operator()(T const &rhs) const { return fast_tanh(rhs); } }; #if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750) template <int N> struct fast_tanh_op<Array<half_t, N>> { CUTLASS_DEVICE Array<half_t, N> operator()(Array<half_t, N> const &rhs) const { Array<half_t, N> result; // use x2 specialization uint32_t const *in = reinterpret_cast<uint32_t const *>(&rhs); uint32_t *out = reinterpret_cast<uint32_t *>(&result); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm volatile ("tanh.approx.f16x2 %0, %1;" : "=r"(out[i]) : "r"(in[i])); } // residual if (N % 2) { uint16_t const *in = reinterpret_cast<uint16_t const *>(&rhs); uint16_t *out = reinterpret_cast<uint16_t *>(&result); asm volatile ("tanh.approx.f16 %0, %1;" : "=h"(out[N - 1]) : "h"(in[N - 1])); } return result; } }; #endif // #if defined(__CUDA_ARCH__) template <typename T, int N> struct fast_tanh_op<Array<T, N>> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &rhs) const { fast_tanh_op<T> fast_op; Array<T, N> y; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = fast_op(rhs[i]); } return y; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Absolute value function template <typename T> CUTLASS_HOST_DEVICE T absolute_value(T x) { if (x < T()) { return -x; } return x; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/fast_math.h/0
{ "file_path": "cutlass/include/cutlass/fast_math.h", "repo_id": "cutlass", "token_count": 11015 }
28
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/numeric_types.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/trace.h" #include "cute/arch/cluster_sm90.hpp" #include "cute/arch/copy_sm90.hpp" #include "cute/algorithm/functional.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/algorithm/gemm.hpp" #include "cute/tensor_predicate.hpp" #include "cute/numeric/arithmetic_tuple.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { using namespace cute; ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages, class ClusterShape, int PipelineAsyncMmaStages, class TileShape_, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_> struct CollectiveMma< MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_> { // // Type Aliases // using DispatchPolicy = MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>; using TileShape = TileShape_; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{})); using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>; using PipelineParams = typename MainloopPipeline::Params; using PipelineState = typename cutlass::PipelineState<DispatchPolicy::Stages>; static constexpr int ThreadCount = CUTE_STATIC_V(size(TiledMma{})); static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); // Tile along modes in a way that maximizes the TMA box size. using SmemLayoutA = decltype(tile_to_shape( SmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}), cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{})); using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}), cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{})); static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 1 or more."); static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value && cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value, "MMA atom must source both A and B operand from smem_desc for this mainloop."); static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>, "GmemTiledCopy - invalid SM90 TMA copy atom specified."); static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>, "GmemTiledCopy - invalid SM90 TMA copy atom specified."); // TMA converts f32 input to tf32 when copying from GMEM to SMEM // For all other types, cast to size equivalent uint type to avoid any rounding by TMA. static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>; static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>; using InternalElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>; using InternalElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>; struct SharedStorage { cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A; cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B; using PipelineStorage = typename MainloopPipeline::SharedStorage; alignas(16) PipelineStorage pipeline_storage; }; // Host side kernel arguments struct Arguments { ElementA const* ptr_A; StrideA dA; ElementB const* ptr_B; StrideB dB; uint32_t mma_promotion_interval = 4; }; // Device side kernel params struct Params { // Assumption: StrideA is congruent with Problem_MK using TMA_A = decltype(make_tma_copy( GmemTiledCopyA{}, make_tensor(static_cast<InternalElementA const*>(nullptr), repeat_like(StrideA{}, int32_t(0)), StrideA{}), SmemLayoutA{}(_,_,0), make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})), size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any // Assumption: StrideB is congruent with Problem_NK using TMA_B = decltype(make_tma_copy( GmemTiledCopyB{}, make_tensor(static_cast<InternalElementB const*>(nullptr), repeat_like(StrideB{}, int32_t(0)), StrideB{}), SmemLayoutB{}(_,_,0), make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})), size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any TMA_A tma_load_a; TMA_B tma_load_b; }; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { (void) workspace; // Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; auto ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A); auto ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B); Tensor tensor_a = make_tensor(ptr_A, make_layout(make_shape(M,K,L), args.dA)); Tensor tensor_b = make_tensor(ptr_B, make_layout(make_shape(N,K,L), args.dB)); typename Params::TMA_A tma_load_a = make_tma_copy( GmemTiledCopyA{}, tensor_a, SmemLayoutA{}(_,_,cute::Int<0>{}), make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})), size<1>(ClusterShape{})); // mcast along N mode for this M load, if any typename Params::TMA_B tma_load_b = make_tma_copy( GmemTiledCopyB{}, tensor_b, SmemLayoutB{}(_,_,cute::Int<0>{}), make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})), size<0>(ClusterShape{})); // mcast along M mode for this N load, if any return { tma_load_a, tma_load_b }; } template<class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { constexpr int tma_alignment_bits = 128; auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; bool implementable = true; constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value; implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{}); constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value; implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{}); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n"); } return implementable; } /// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance CUTLASS_DEVICE static void prefetch_tma_descriptors(Params const& mainloop_params) { cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor()); cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor()); } /// Perform a collective-scoped matrix multiply-accumulate /// Producer Perspective template < class TensorA, class TMA_LOAD_A, class TensorB, class TMA_LOAD_B, class FrgTensorC, class KTileIterator > CUTLASS_DEVICE void operator() ( TensorA const& gA, TMA_LOAD_A& tma_load_a, TensorB const& gB, TMA_LOAD_B& tma_load_b, FrgTensorC& accum, KTileIterator k_tile_iter, int k_tile_count, int thread_idx, uint32_t block_rank_in_cluster, char* shared_memory, Params const& mainloop_params) { using namespace cute; static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2."); static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2."); static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3."); static_assert(cute::is_void_v<SmemCopyAtomA>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); static_assert(cute::is_void_v<SmemCopyAtomB>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); SharedStorage& storage = *reinterpret_cast<SharedStorage*>(shared_memory); Tensor sA = make_tensor(make_smem_ptr(storage.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(storage.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) // // Prepare the TMA loads for A and B // constexpr uint32_t cluster_shape_x = get<0>(ClusterShape()); uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x}; auto block_tma_a = tma_load_a.get_slice(cluster_local_block_id.y); auto block_tma_b = tma_load_b.get_slice(cluster_local_block_id.x); // Applies the mapping from block_tma_a Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k) Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE) Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k) Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE) // // Prepare TMA membars and PREFETCH // // Number of pipelined k-tiles in smem constexpr int K_PIPE_MAX = DispatchPolicy::Stages; // NOTE: Another parameter: Partition the pipeline between active MMAs and active TMAs // Tunable via the dispatch policy to tollerate latencies evenly across the math and compute stages // K_PIPE_MMAS: The max number of active MMA pipes at beginning of every loop // K_PIPE_TMAS: The max number of active TMA pipes at beginning of every loop (geq 1) constexpr int K_PIPE_MMAS = DispatchPolicy::PipelineAsyncMmaStages; constexpr int K_PIPE_TMAS = K_PIPE_MAX - K_PIPE_MMAS; static_assert(0 <= K_PIPE_MMAS && K_PIPE_MMAS < K_PIPE_MAX); static_assert(0 < K_PIPE_TMAS && K_PIPE_TMAS <= K_PIPE_MAX); static_assert(K_PIPE_MMAS < K_PIPE_MAX - 1); // Set the bytes transferred in this TMA transaction (may involve multiple issues) constexpr uint32_t TmaTransactionBytes = static_cast<uint32_t>( cutlass::bits_to_bytes(size<0>(sA) * size<1>(sA) * sizeof_bits<InternalElementA>::value) + cutlass::bits_to_bytes(size<0>(sB) * size<1>(sB) * sizeof_bits<InternalElementB>::value)); // Obtain warp index int warp_idx = canonical_warp_idx_sync(); int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; PipelineParams params; params.transaction_bytes = TmaTransactionBytes; params.role = MainloopPipeline::ThreadCategory::ProducerConsumer; params.is_leader = warp_group_thread_idx == 0; params.num_consumers = NumThreadsPerWarpGroup; MainloopPipeline pipeline(storage.pipeline_storage, params, ClusterShape{}); // State variables used for iterating the circular buffer // smem_pipe_read / release is used by the consumer of SMEM data - i.e MMA // smem_pipe_write is used by the producer of SMEM data - i.e TMA PipelineState smem_pipe_read; PipelineState smem_pipe_release; PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>(); // We need this to guarantee that the Pipeline init is visible // To all producers and consumer blocks in the Cluster if constexpr (size(ClusterShape{}) > 1) { cute::cluster_arrive_relaxed(); cute::cluster_wait(); } else { __syncthreads(); } // Set predicate for the lowest lane_id in the warp int lane_predicate = cute::elect_one_sync(); uint16_t mcast_mask_a = 0; uint16_t mcast_mask_b = 0; // Keep a copy to know when to stop issuing loads int k_tile_count_tma = k_tile_count; // Issue TmaLoads (Prologue fetches) if (warp_idx == 0 && lane_predicate == 1) { // Maps the tile -> block, value if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) { auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id for (int n = 0; n < size<1>(block_layout); ++n) { mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{})); } } if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) { auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id for (int m = 0; m < size<0>(block_layout); ++m) { mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{})); } } // Issue the prologue loads int prologue_tma_count = min(K_PIPE_MAX, k_tile_count); CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < prologue_tma_count; ++stage) { pipeline.producer_acquire(smem_pipe_write); using BarrierType = typename MainloopPipeline::ProducerBarrierType; BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write); copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,stage)); copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,stage)); ++k_tile_iter; ++smem_pipe_write; } k_tile_count_tma -= prologue_tma_count; } // // Define C accumulators and A/B partitioning // TiledMma tiled_mma; auto thread_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE) // Allocate "fragments/descriptors" Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tAsA)); // PIPE CUTE_STATIC_ASSERT_V(size<3>(tCsB) == size<3>(tBsB)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE __syncthreads(); tiled_mma.accumulate_ = GMMA::ScaleOut::Zero; warpgroup_fence_operand(accum); // Prologue MMAs CUTLASS_PRAGMA_UNROLL for (int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count); prologue_mma_count > 0; --prologue_mma_count) { // WAIT on smem_pipe_read until it's data is available pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { // (V,M,K) x (V,N,K) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block,smem_pipe_read.index()), tCrB(_,_,k_block,smem_pipe_read.index()), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; } warpgroup_commit_batch(); ++smem_pipe_read; --k_tile_count; } warpgroup_fence_operand(accum); // // PIPELINED MAIN LOOP // CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 0; --k_tile_count) { // WAIT on smem_pipe_read until data is available pipeline.consumer_wait(smem_pipe_read); // // Compute on k_tile // warpgroup_fence_operand(accum); warpgroup_arrive(); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { // (V,M,K) x (V,N,K) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block,smem_pipe_read.index()), tCrB(_,_,k_block,smem_pipe_read.index()), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; } warpgroup_commit_batch(); /// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_write is consumed warpgroup_wait<K_PIPE_MMAS>(); warpgroup_fence_operand(accum); pipeline.consumer_release(smem_pipe_release); // UNLOCK wr stage, done _computing_ on it // // Copy gmem to smem for *k_tile_iter // // Do Acquire & Load only if needed - helps with both performance and also corner case illegal barrier-ops if (warp_idx == 0 && lane_predicate == 1 && (k_tile_count_tma > 0) ) { pipeline.producer_acquire(smem_pipe_write); // LOCK wr stage, for _writing_ using BarrierType = typename MainloopPipeline::ProducerBarrierType; BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write); copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write.index())); copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write.index())); ++smem_pipe_write; ++k_tile_iter; --k_tile_count_tma; } // Advance consumer pipeline ++smem_pipe_read; ++smem_pipe_release; } // Wait on all GMMAs warpgroup_wait<0>(); warpgroup_fence_operand(accum); // Workaround for ensuring Smem destruction doesn't happen accidentally if constexpr (size(typename DispatchPolicy::ClusterShape{}) > 1) { cute::cluster_arrive(); cute::cluster_wait(); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp", "repo_id": "cutlass", "token_count": 9432 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/arch/mma.h" #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/device/gemm_universal_base.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /*! GemmUniversal is a stateful, reusable GEMM handle. Once initialized for a given GEMM computation (problem geometry and data references), it can be reused across different GEMM problems having the geometry. (Once initialized, details regarding problem geometry and references to workspace memory cannot be updated.) The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and batched array variants. */ template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_ = arch::Sm70, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator, /// Complex elementwise transformation on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex elementwise transformation on B operand ComplexTransform TransformB = ComplexTransform::kNone, /// Gather operand A by using an index array bool GatherA = false, /// Gather operand B by using an index array bool GatherB = false, /// Scatter result D by using an index array bool ScatterD = false, /// Permute result D typename PermuteDLayout_ = layout::NoPermute, /// Permute operand A typename PermuteALayout_ = layout::NoPermute, /// Permute operand B typename PermuteBLayout_ = layout::NoPermute > class GemmUniversal : public GemmUniversalBase< typename kernel::DefaultGemmUniversal< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_, SharedMemoryClearOption::kNone, GatherA, GatherB, ScatterD, PermuteDLayout_, PermuteALayout_, PermuteBLayout_ >::GemmKernel > { public: using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; using PermuteDLayout = PermuteDLayout_; using PermuteALayout = PermuteALayout_; using PermuteBLayout = PermuteBLayout_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Base = GemmUniversalBase< typename kernel::DefaultGemmUniversal< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_, SharedMemoryClearOption::kNone, GatherA, GatherB, ScatterD, PermuteDLayout_, PermuteALayout_, PermuteBLayout_ >::GemmKernel >; using Arguments = typename Base::Arguments; using GemmKernel = typename Base::GemmKernel; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for column-major output exchanges problem size and operand. template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Element type for internal accumulation typename ElementAccumulator_, /// Operator class tag typename OperatorClass_, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_, /// Warp-level tile size (concept: GemmShape) typename WarpShape_, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_, /// Epilogue output operator typename EpilogueOutputOp_, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_, /// Number of stages used in the pipelined mainloop int Stages, /// Access granularity of A matrix in units of elements int AlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB, /// Operation performed by GEMM typename Operator_, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Gather operand A by using an index array bool GatherA, /// Gather operand B by using an index array bool GatherB, /// Scatter result D by using an index array bool ScatterD, /// Permute result D typename PermuteDLayout_, /// Permute operand A typename PermuteALayout_, /// Permute operand B typename PermuteBLayout_ > class GemmUniversal<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, Operator_, TransformA, TransformB, GatherA, GatherB, ScatterD, PermuteDLayout_, PermuteALayout_, PermuteBLayout_> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; using PermuteDLayout = PermuteDLayout_; using PermuteALayout = PermuteALayout_; using PermuteBLayout = PermuteBLayout_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using UnderlyingOperator = typename GemmUniversal< ElementB, typename layout::LayoutTranspose<LayoutB>::type, ElementA, typename layout::LayoutTranspose<LayoutA>::type, ElementC, layout::RowMajor, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, kAlignmentB, kAlignmentA, Operator, kTransformB, kTransformA, GatherB, GatherA, ScatterD, PermuteDLayout, PermuteBLayout, PermuteALayout >::Base; using GemmKernel = typename UnderlyingOperator::GemmKernel; static int const kAlignmentC = EpilogueOutputOp::kCount; /// Argument structure using Arguments = typename UnderlyingOperator::Arguments; private: UnderlyingOperator underlying_operator_; public: /// Constructs the GEMM. GemmUniversal() { } /// Helper to construct a transposed equivalent for the underying GEMM operator static Arguments to_underlying_arguments(Arguments const &args) { return args.transposed_problem(); } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/gemm_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/gemm_universal.h", "repo_id": "cutlass", "token_count": 5204 }
30
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This file contains definitions and utility functions for describing problem shapes for 3.x Ptr-Array GEMMs and Grouped GEMMs. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_coord.h" #include "cute/container/array.hpp" #if ! defined(__CUDACC_RTC__) #include <initializer_list> #endif //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm { //////////////////////////////////////////////////////////////////////////////////////////////////// template <class ProblemShape_> struct GroupProblemShape { using UnderlyingProblemShape = ProblemShape_; int32_t num_groups = 1; UnderlyingProblemShape* problem_shapes = nullptr; UnderlyingProblemShape const* host_problem_shapes = nullptr; CUTLASS_HOST_DEVICE int32_t groups() const { return num_groups; } CUTLASS_HOST_DEVICE UnderlyingProblemShape const get_problem_shape(int32_t group_idx) const { return problem_shapes[group_idx]; } CUTLASS_HOST_DEVICE UnderlyingProblemShape const get_host_problem_shape(int32_t group_idx) const { return host_problem_shapes[group_idx]; } CUTLASS_HOST_DEVICE bool is_host_problem_shape_available() { return host_problem_shapes != nullptr; } }; template <class ProblemShape_> class ArrayProblemShape { public: using UnderlyingProblemShape = ProblemShape_; ArrayProblemShape() = default; ArrayProblemShape(UnderlyingProblemShape ps) : problem_shape_(ps) {} // Num of groups for Ptr-Array GEMM always remain one, just the number of batches (l) can vary // This is just to maintain uniformity with GroupProblemShape constexpr int32_t groups() const { return 1; } UnderlyingProblemShape* problem_shapes() const { return &problem_shape_; } UnderlyingProblemShape const* host_problem_shapes() const { return &problem_shape_; } // This is just to maintain uniformity with GroupProblemShape CUTLASS_HOST_DEVICE UnderlyingProblemShape const get_problem_shape(int32_t /* unused */ = 0) const { return problem_shape_; } CUTLASS_HOST_DEVICE UnderlyingProblemShape const get_host_problem_shape(int32_t /* unused */ = 0) const { return problem_shape_; } CUTLASS_HOST_DEVICE bool is_host_problem_shape_available() { return true; } private: UnderlyingProblemShape problem_shape_{}; }; } // namespace cutlass::gemm
cutlass/include/cutlass/gemm/group_array_problem_shape.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/group_array_problem_shape.hpp", "repo_id": "cutlass", "token_count": 1233 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are accommodated by exchanging A and B operands and assuming transposed layouts. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/complex.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/trmm_universal.h" #include "cutlass/gemm/kernel/default_trmm.h" #include "cutlass/gemm/kernel/default_trmm_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Side Mode for the kernel SideMode kSideMode, /// Fill Mode for the triangular matrix FillMode kFillMode, /// Diag Type for the triangular matrix DiagType kDiagType, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by TRMM typename Operator, /// typename Enable = void > struct DefaultTrmmUniversal; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Real-valued TRMM kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Side Mode for the kernel SideMode kSideMode, /// Fill Mode for the triangular matrix FillMode kFillMode, /// Diag Type for the triangular matrix DiagType kDiagType, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by TRMM typename Operator> struct DefaultTrmmUniversal< ElementA, LayoutA, ComplexTransform::kNone, // transform A kAlignmentA, ElementB, LayoutB, ComplexTransform::kNone, // transform B kAlignmentB, kSideMode, kFillMode, kDiagType, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultTrmmKernel = typename kernel::DefaultTrmm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, kSideMode, kFillMode, kDiagType, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator >::TrmmKernel; /// Define the kernel in terms of the default kernel using TrmmKernel = kernel::TrmmUniversal< typename DefaultTrmmKernel::Mma, typename DefaultTrmmKernel::Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Complex-valued TRMM kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Side Mode for the kernel SideMode kSideMode, /// Fill Mode for the triangular matrix FillMode kFillMode, /// Diag Type for the triangular matrix DiagType kDiagType, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by TRMM typename Operator > struct DefaultTrmmUniversal< ElementA, LayoutA, TransformA, kAlignmentA, ElementB, LayoutB, TransformB, kAlignmentB, kSideMode, kFillMode, kDiagType, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultTrmmKernel = typename kernel::DefaultTrmmComplex< ElementA, LayoutA, ElementB, LayoutB, kSideMode, kFillMode, kDiagType, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial >::TrmmKernel; /// Define the kernel in terms of the default kernel using TrmmKernel = kernel::TrmmUniversal< typename DefaultTrmmKernel::Mma, typename DefaultTrmmKernel::Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/default_trmm_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_trmm_universal.h", "repo_id": "cutlass", "token_count": 3372 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/arch/arch.h" #include "cutlass/fast_math.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/params_universal_base.h" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > class GemmUniversal< Mma_, Epilogue_, ThreadblockSwizzle_, void, // 3.x kernels use the first template argument to define the ProblemShape // We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API cute::enable_if_t<not (cute::is_tuple<Mma_>::value || IsCutlass3ArrayKernel<Mma_>::value)> > { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value); // // Structures // /// Argument structure struct Arguments : UniversalArgumentsBase { // // Data members // typename EpilogueOutputOp::Params epilogue; void const * ptr_A; void const * ptr_B; void const * ptr_C; void * ptr_D; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; typename LayoutA::Stride stride_a; typename LayoutB::Stride stride_b; typename LayoutC::Stride stride_c; typename LayoutC::Stride stride_d; typename LayoutA::Stride::LongIndex lda; typename LayoutB::Stride::LongIndex ldb; typename LayoutC::Stride::LongIndex ldc; typename LayoutC::Stride::LongIndex ldd; int const * ptr_gather_A_indices; int const * ptr_gather_B_indices; int const * ptr_scatter_D_indices; // // Methods // Arguments(): ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), ptr_gather_A_indices(nullptr), ptr_gather_B_indices(nullptr), ptr_scatter_D_indices(nullptr) {} /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, typename LayoutA::Stride stride_a, typename LayoutB::Stride stride_b, typename LayoutC::Stride stride_c, typename LayoutC::Stride stride_d, int const *ptr_gather_A_indices = nullptr, int const *ptr_gather_B_indices = nullptr, int const *ptr_scatter_D_indices = nullptr) : UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), ptr_scatter_D_indices(ptr_scatter_D_indices) { lda = 0; ldb = 0; ldc = 0; ldd = 0; CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); } /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, typename LayoutA::Stride::LongIndex lda, typename LayoutB::Stride::LongIndex ldb, typename LayoutC::Stride::LongIndex ldc, typename LayoutC::Stride::LongIndex ldd, int const *ptr_gather_A_indices = nullptr, int const *ptr_gather_B_indices = nullptr, int const *ptr_scatter_D_indices = nullptr ): UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), ptr_scatter_D_indices(ptr_scatter_D_indices) { stride_a = make_Coord(lda); stride_b = make_Coord(ldb); stride_c = make_Coord(ldc); stride_d = make_Coord(ldd); CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.stride_a, args.stride_b); std::swap(args.batch_stride_A, args.batch_stride_B); std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params : UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB> { using ParamsBase = UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB>; // // Data members // typename Mma::IteratorA::Params params_A; typename Mma::IteratorB::Params params_B; typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::Params params_D; typename EpilogueOutputOp::Params output_op; void * ptr_A; void * ptr_B; void * ptr_C; void * ptr_D; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int * ptr_gather_A_indices; int * ptr_gather_B_indices; int * ptr_scatter_D_indices; // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : ParamsBase(args, device_sms, sm_occupancy), params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a), params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b), params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c), params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d), output_op(args.epilogue), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)), ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)), ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices)) {} /// Lightweight update given a subset of arguments. void update(Arguments const &args) { CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); // Update input/output pointers ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; this->batch_stride_D = args.batch_stride_D; ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices); ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices); ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices); output_op = args.epilogue; } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); static int const kAlignmentA = (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value) ? 32 : (cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) ? 64 : Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (cute::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (cute::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (cute::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (cute::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (cute::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (cute::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmUniversal op; op(params, shared_storage); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { ThreadblockSwizzle threadblock_swizzle; run_with_swizzle(params, shared_storage, threadblock_swizzle); } /// Executes one GEMM with an externally-provided swizzling function CUTLASS_DEVICE void run_with_swizzle(Params const &params, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A, params.ptr_gather_A_indices); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B, params.ptr_gather_B_indices); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset, params.ptr_scatter_D_indices ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset, params.ptr_scatter_D_indices ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_universal.h", "repo_id": "cutlass", "token_count": 9704 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/blas3.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T) typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T) typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper) BlasMode BlasMode_ ///! Blas3 computation mode > struct Rank2KUniversal { public: using Mma1 = Mma1_; using Mma2 = Mma2_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma1::IteratorA::Element; using ElementB = typename Mma1::IteratorB::Element; // Mma1 (A x B^T) using LayoutA = typename Mma1::IteratorA::Layout; using LayoutBT = typename Mma1::IteratorB::Layout; static ComplexTransform const kMma1TransformA = Mma1::kTransformA; static ComplexTransform const kMma1TransformB = Mma1::kTransformB; // Mma2 (B x A^T) using LayoutB = typename Mma2::IteratorA::Layout; using LayoutAT = typename Mma2::IteratorB::Layout; static ComplexTransform const kMma2TransformA = Mma2::kTransformA; static ComplexTransform const kMma2TransformB = Mma2::kTransformB; // Common type definitions for Mma1 and Mma2 using Operator = typename Mma1::Operator; using OperatorClass = typename Mma1::Operator::OperatorClass; using ThreadblockShape = typename Mma1::Shape; using WarpShape = typename Mma1::Operator::Shape; using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; using ArchTag = typename Mma1::ArchTag; static int const kStages = Mma1::kStages; static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; // Output related typedefinitions using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static FillMode const kFillModeC = FillModeC_; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; static BlasMode const kBlasMode = BlasMode_; /// Warp count (concept: GemmShape) using WarpCount = typename Mma1::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; GemmCoord problem_size {}; int batch_count{1}; typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A = nullptr; void const * ptr_B = nullptr; void const * ptr_C = nullptr; void * ptr_D = nullptr; int64_t batch_stride_A {0}; int64_t batch_stride_B {0}; int64_t batch_stride_C {0}; int64_t batch_stride_D {0}; typename LayoutA::Stride::Index lda{0}; typename LayoutB::Stride::Index ldb{0}; typename LayoutC::Stride::Index ldc{0}; typename LayoutC::Stride::Index ldd{0}; bool allow_early_exit{false}; // // Methods // Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, bool allow_early_exit = false ): mode(mode), problem_size(problem_size), batch_count(batch_count), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_B(0), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), allow_early_exit(allow_early_exit) { } /// Returns arguments for a the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; // Mma1 Iterator A and B params typename Mma1::IteratorA::Params params_A{}; typename Mma1::IteratorB::Params params_BT{}; // Mma2 Iterator A and B params typename Mma2::IteratorA::Params params_B{}; typename Mma2::IteratorB::Params params_AT{}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count{0}; int gemm_k_size{0}; void * ptr_A = nullptr; void * ptr_B = nullptr; void * ptr_C = nullptr; void * ptr_D = nullptr; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; int *semaphore = nullptr; bool allow_early_exit {false}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( Arguments const &args, cutlass::gemm::GemmCoord const & grid_tiled_shape, int gemm_k_size, void *workspace = nullptr ): problem_size(args.problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(args.lda), params_BT(args.ldb), params_B(args.ldb), params_AT(args.lda), params_C(args.ldc), params_D(args.ldd), output_op(args.epilogue), mode(args.mode), batch_count(args.batch_count), gemm_k_size(gemm_k_size), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(const_cast<void *>(args.ptr_D)), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), semaphore(static_cast<int *>(workspace)), allow_early_exit(args.allow_early_exit) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; output_op = args.epilogue; semaphore = static_cast<int *>(workspace); } }; /// Shared memory storage structure union SharedStorage { typename Mma1::SharedStorage mma1_main_loop; typename Mma2::SharedStorage mma2_main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Methods // CUTLASS_DEVICE Rank2KUniversal() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Early exit following LAPACK's definition if (params.allow_early_exit && (params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) { return; } // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Early exit if Fill Mode is Lower and // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) if (kFillModeC == cutlass::FillMode::kLower && (threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) { return; } // Early exit if Fill Mode is Upper and // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) if (kFillModeC == cutlass::FillMode::kUpper && threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { return; } bool tile_on_diagonal = false; // Mark tiles that are being crossed by the main diagonal // (top-right and bottom-left corners are on either side of the diagonal) if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN && threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { tile_on_diagonal = true; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_MxK{ threadblock_tile_offset.m() * Mma1::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_KxN{ offset_k, threadblock_tile_offset.n() * Mma1::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands for Mma1 typename Mma1::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_MxK); typename Mma1::IteratorB iterator_BT( params.params_BT, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_KxN); // Construct iterators to A and B operands for Mma2 typename Mma2::IteratorA iterator_B( params.params_B, ptr_B, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_MxK); typename Mma2::IteratorB iterator_AT( params.params_AT, ptr_A, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_KxN); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply for Mma1 (A x BT) Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx); // Construct thread-scoped matrix multiply for Mma2 (B x AT) Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx); typename Mma1::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; // Compute threadblock-scoped matrix multiply-add (A x BT) mma1( gemm_k_iterations, accumulators, iterator_A, iterator_BT, accumulators); // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. if (kBlasMode == BlasMode::kHermitian) { // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma1::Shape::kM, threadblock_tile_offset.n() * Mma1::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // If CTA not on diagonal, FillMode doesn't apply. FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } __syncthreads(); accumulators.clear(); } // Compute threadblock-scoped matrix multiply-add (B x AT) mma2( gemm_k_iterations, accumulators, iterator_B, iterator_AT, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); /* Needed for HER2K where the second HERK is multiplied by conj(alpha) */ typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1); EpilogueOutputOp output_op_her2k(second_her2k_params); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma1::Shape::kM, threadblock_tile_offset.n() * Mma1::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. if (kBlasMode == BlasMode::kHermitian) { ptr_C = static_cast<ElementC *>(params.ptr_D); } ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating if (kBlasMode == BlasMode::kSymmetric) { output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } else { output_op_her2k.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // If CTA not on diagonal, FillMode doesn't apply. FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. if (kBlasMode == BlasMode::kSymmetric) { epilogue( output_op, iterator_D, accumulators, iterator_C); } else { epilogue( output_op_her2k, iterator_D, accumulators, iterator_C); } // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h", "repo_id": "cutlass", "token_count": 9747 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Sparse GEMM with visitor. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/kernel/sparse_gemm.h" #include "cutlass/gemm/kernel/params_sparse_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // Sparse Gemm that compute the epilogue visitor functor template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct SparseGemmWithEpilogueVisitor : public SparseGemm<Mma_, Epilogue_, ThreadblockSwizzle_, false> { using Base = SparseGemm<Mma_, Epilogue_, ThreadblockSwizzle_, false>; using Mma = Mma_; using Epilogue = Epilogue_; using ThreadblockSwizzle = ThreadblockSwizzle_; using FusionCallbacks = typename Epilogue::FusionCallbacks; using ParamsA = typename Mma::IteratorA::Params; using TensorRefA = typename Mma::IteratorA::TensorRef; using ParamsB = typename Mma::IteratorB::Params; using TensorRefB = typename Mma::IteratorB::TensorRef; using ParamsE = typename Mma::IteratorE::Params; using TensorRefE = typename Mma::IteratorE::TensorRef; static int const kSparse = Base::kSparse; static int const kElementsPerElementE = Base::kElementsPerElementE; using SharedStorage = typename Base::SharedStorage; /// Parameters structure struct Params : public SparseParamsBase< ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB, ParamsE, TensorRefE> { using Base = SparseParamsBase< ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB, ParamsE, TensorRefE>; // // Data members // typename FusionCallbacks::Params output_op; cute::Shape<int32_t,int32_t,int32_t> problem_shape; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Mma::IteratorE::TensorRef ref_E, typename FusionCallbacks::Arguments output_op = typename FusionCallbacks::Arguments() ): Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK), output_op(FusionCallbacks::to_underlying_arguments(problem_size, output_op, nullptr /*workspace*/)), problem_shape(problem_size.m(), problem_size.n(), 1) { } }; // // Methods // CUTLASS_HOST_DEVICE SparseGemmWithEpilogueVisitor() { } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size / kSparse, }; cutlass::MatrixCoord tb_offset_B{ threadblock_tile_offset.k() * params.gemm_k_size, threadblock_tile_offset.n() * Mma::Shape::kN }; cutlass::MatrixCoord tb_offset_E{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size / kSparse, }; // Problem size is a function of threadblock index in the K dimension int problem_size_k = min( params.problem_size.k(), (threadblock_tile_offset.k() + 1) * params.gemm_k_size); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A, B, and E operands typename Mma::IteratorA iterator_A( params.params_A, params.ref_A.data(), {params.problem_size.m(), problem_size_k / kSparse}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, params.ref_B.data(), {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); typename Mma::IteratorE iterator_E( params.params_E, params.ref_E.data(), {params.problem_size.m(), problem_size_k / kSparse / kElementsPerElementE}, thread_idx, tb_offset_E); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); if (gemm_k_iterations > 0) { // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); } // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); // // Epilogue // Epilogue epilogue( params.output_op, shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Execute the epilogue operator to update the destination tensor. epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/sparse_gemm_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sparse_gemm_with_visitor.h", "repo_id": "cutlass", "token_count": 2891 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/gemm/threadblock/default_mma_core.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Below is for arch::OpMultiplyAddFastF16 //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float, layout::ColumnMajor, float, layout::RowMajor, float, LayoutC_, arch::OpClassTensorOp, 2, arch::OpMultiplyAddFastF16> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = float; using LayoutA = layout::ColumnMajor; using ElementB = float; using LayoutB = layout::RowMajor; using ElementC = float; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 256; /// Default Operator using Operator = arch::OpMultiplyAdd; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>; // Shared memory layout using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float, layout::RowMajor, float, layout::ColumnMajor, float, LayoutC_, arch::OpClassTensorOp, 2, arch::OpMultiplyAddFastF16> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = float; using LayoutA = layout::RowMajor; using ElementB = float; using LayoutB = layout::ColumnMajor; using ElementC = float; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 256; /// Default Operator using Operator = arch::OpMultiplyAdd; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<sizeof_bits<half_t>::value, Shape::kK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<half_t>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 0, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 1, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float, layout::RowMajor, float, layout::RowMajor, float, LayoutC_, arch::OpClassTensorOp, 2, arch::OpMultiplyAddFastF16> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = float; using LayoutA = layout::RowMajor; using ElementB = float; using LayoutB = layout::RowMajor; using ElementC = float; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 256; /// Default Operator using Operator = arch::OpMultiplyAdd; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<half_t>::value, Shape::kK>; // Shared memory layout using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 0, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float, layout::ColumnMajor, float, layout::ColumnMajor, float, LayoutC_, arch::OpClassTensorOp, 2, arch::OpMultiplyAddFastF16> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = float; using LayoutA = layout::ColumnMajor; using ElementB = float; using LayoutB = layout::ColumnMajor; using ElementC = float; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 256; /// Default Operator using Operator = arch::OpMultiplyAdd; // Warp thread arrangement static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<half_t>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<8, 4>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major-interleave /// B: row-major-interleave /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes /// /// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m /// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators /// can be reused. The shared store iterator is the same as the crosswise shared /// store iterator. So, the only thing we need to do is to swap the coordinates /// (contiguous <=> strided) used by the global iterator and the shared store /// iterator. template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor, /// Number of interleaved k int InterleavedK> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajorInterleaved<InterleavedK>, ElementB_, layout::RowMajorInterleaved<InterleavedK>, ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_, AccumulatorsInRowMajor> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using ElementB = ElementB_; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassTensorOp; static int const kInterleavedK = InterleavedK; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = kAccessSizeInBits / sizeof_bits<ElementA>::value; static int const kWarpThreadArrangementContiguous = kInterleavedK / kElementsPerAccess; static int const kWarpThreadArrangementStrided = kWarpSize / kWarpThreadArrangementContiguous; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, kInterleavedK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, kInterleavedK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMap< IteratorThreadMapA, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMap< IteratorThreadMapB, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h", "repo_id": "cutlass", "token_count": 15728 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/threadblock/mma_sparse_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Iterates over tiles of E operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorE_, /// Iterates over tiles of E operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorE_, /// Cache operation for operand E cutlass::arch::CacheOperation::Kind CacheOpE, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class SparseMmaMultistage : public SparseMmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = SparseMmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of E operand in global memory using IteratorE = IteratorE_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; using SmemIteratorE = SmemIteratorE_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; static cutlass::arch::CacheOperation::Kind const kCacheOpE = CacheOpE; static int const kSparse = Policy::Operator::kSparse; static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits; static int const kMaxID2 = Policy::Operator::kMaxID2; static int const kElementsPerElementE = Policy::Operator::kElementsPerElementE; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// ElementE using ElementE = typename IteratorE::Element; /// LayoutE using LayoutE = typename IteratorE::Layout; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { /// Number of async copies to load one stage of operand A static int const TBLoadIterationsA = IteratorA::ThreadMap::Iterations::kCount; /// Number of async copies to load one stage of operand B static int const TBLoadIterationsB = IteratorB::ThreadMap::Iterations::kCount; /// Number of async copies to load one stage of operand E static int const TBLoadIterationsE = IteratorE::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of async copies to load one group of operand A static int const kAccessesPerGroupA = (TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of async copies to load one group of operand B static int const kAccessesPerGroupB = (TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of async copies to load one group of operand E static int const kAccessesPerGroupE = (TBLoadIterationsE + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// E operand is tiny. For the most of time, not all the warps are needed /// to load it from the global memory. static int const kValidWarps = IteratorE::ThreadMap::kThreads / 32; /// B operand is twice as big as A which brings very high register pressure. /// We have to sacrifice the double buffer when the warp tile size is big. static int const kBBufferSize = ((sizeof(typename Operator::ElementC) == 4) && ((platform::is_same<typename Operator::Policy::Operator::ElementA, typename Operator::ElementA>::value && platform::is_same<typename Operator::Policy::Operator::ElementB, typename Operator::ElementB>::value)) && (Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64)) ? 1 : 2; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; using WarpFragmentE = typename Operator::FragmentE; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; /// Iterator to write threadblock-scoped tile of E operand to shared memory SmemIteratorE smem_iterator_E_; /// Warp id bool is_warp_valid_; public: /// Construct from tensor references CUTLASS_DEVICE SparseMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx), smem_iterator_E_(shared_storage.operand_E_ref(), thread_idx) { is_warp_valid_ = warp_idx < Detail::kValidWarps; // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); this->warp_tile_iterator_E_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, IteratorE &iterator_E, int group_start_A = 0, int group_start_B = 0, int group_start_E = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // async copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::TBLoadIterationsA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // async copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::TBLoadIterationsB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } } iterator_E.set_iteration_index(group_start_E); this->smem_iterator_E_.set_iteration_index(group_start_E); // async copy for operand E CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupE; ++j) { if (group_start_E + j < Detail::TBLoadIterationsE) { typename IteratorE::AccessType *dst_ptr = reinterpret_cast<typename IteratorE::AccessType *>( this->smem_iterator_E_.get()); int const kSrcBytes = sizeof_bits<typename IteratorE::Element>::value * IteratorE::ThreadMap::kElementsPerAccess / 8; auto gmem_ptr = iterator_E.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpE>( dst_ptr, gmem_ptr, iterator_E.valid() && is_warp_valid_); ++iterator_E; ++this->smem_iterator_E_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over E operand in global memory IteratorE iterator_E, ///< initial value of accumulator FragmentC const &src_accum) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_E.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // async copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // async copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } iterator_E.set_iteration_index(0); this->smem_iterator_E_.set_iteration_index(0); // async copy for operand E CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsE; ++j) { typename IteratorE::AccessType *dst_ptr = reinterpret_cast<typename IteratorE::AccessType *>( this->smem_iterator_E_.get()); int const kSrcBytes = sizeof_bits<typename IteratorE::Element>::value * IteratorE::ThreadMap::kElementsPerAccess / 8; if (is_warp_valid_) cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpE>( dst_ptr, iterator_E.get(), iterator_E.valid()); ++iterator_E; ++this->smem_iterator_E_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); iterator_E.add_tile_offset({0, 1}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); this->smem_iterator_E_.add_tile_offset({0, 1}); // cp.async.commit_group - completes a stage cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[Detail::kBBufferSize]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[Detail::kBBufferSize]; WarpFragmentE warp_frag_E[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_E_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); this->warp_tile_iterator_E_.load(warp_frag_E[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; ++this->warp_tile_iterator_E_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_E.clear_mask(gemm_k_iterations == 0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_E_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_E_.load(warp_frag_E[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_E_; if (Detail::kBBufferSize == 2) { this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.load( warp_loaded_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize]); ++this->warp_tile_iterator_B_; } if (warp_mma_k > 0) warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % Detail::kBBufferSize]); warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize], accum, warp_frag_E[warp_mma_k % 2] ); if (Detail::kBBufferSize == 1) { this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_B_; } // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; group_start_iteration_E = warp_mma_k * Detail::kAccessesPerGroupE; copy_tiles_and_advance( iterator_A, iterator_B, iterator_E, group_start_iteration_A, group_start_iteration_B, group_start_iteration_E); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; group_start_iteration_E = (warp_mma_k + 1) * Detail::kAccessesPerGroupE; copy_tiles_and_advance( iterator_A, iterator_B, iterator_E, group_start_iteration_A, group_start_iteration_B, group_start_iteration_E); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); iterator_E.add_tile_offset({0, 1}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); this->smem_iterator_E_.add_tile_offset({0, 1}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); this->smem_iterator_E_.add_tile_offset({0, -Base::kStages}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); this->warp_tile_iterator_E_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_E.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize]); } } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h", "repo_id": "cutlass", "token_count": 10700 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpGaussianComplexAccumulatorTileIterator; //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for complex<T> /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of underlying field of reals. typename RealElement, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpGaussianComplexAccumulatorTileIterator< Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = complex<RealElement>; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM, Shape::kColumn / InstructionShape::kN>; }; private: // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements // of that row. The accumulators within one row are assumed to be consecutive. static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile. It is assumed that the accumulators /// are stored in a gaussian complex arrangement with parts 1, 2, and 3 as entirely contiguous /// arranged as [part1, part2, part3] using Fragment = Array<RealElement, (Shape::kCount / kThreads) * 3>; static int const kPart1Index = (Shape::kCount / kThreads) * 0; static int const kPart2Index = (Shape::kCount / kThreads) * 1; static int const kPart3Index = (Shape::kCount / kThreads) * 2; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpGaussianComplexAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; Element z = offset_ref.at({accum_m, accum_n}); frag[mma_accum_start + row * kElementsPerAccess + col + kPart1Index] = z.real() + z.imag(); frag[mma_accum_start + row * kElementsPerAccess + col + kPart2Index] = -z.real(); frag[mma_accum_start + row * kElementsPerAccess + col + kPart3Index] = z.imag(); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; Element z(frag[kPart1Index + idx] - frag[kPart3Index + idx], frag[kPart1Index + idx] + frag[kPart2Index + idx]); offset_ref.at({accum_m, accum_n}) = z; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h", "repo_id": "cutlass", "token_count": 5020 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators to load sparse meta data used by warp-level matrix multiply operations targeting Sparse Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads, /// Number of partitions along K dimension int PartitionsK_ = 1> class SparseMmaTensorOpMetaTileIterator { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; static int const kSparse = 2; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kColumn % InstructionShape::kColumn), "Shape of warp-level Mma must be divisible by operator shape."); static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; // Determine number of elements along outer dimension per individual LDSM op static int const kLdsmOpOuter = InstructionShape::kColumn; static int const kLdsmOpInner = 8 * kElementsPerAccess / kLdsmOpOuter; static_assert(!(Shape::kColumn % kLdsmOpOuter), "Shape of warp-level mma must be divisible by LDSM's " "fundamental tile size."); static_assert(!(Shape::kRow % kLdsmOpInner), "Shape of warp-level mma must be divisible by LDSM's " "fundamental tile size."); /// Shape of one individual LDSM instruction static int const LdsmShapeColumn = InstructionShape::kColumn / kLdsmOpOuter; static int const LdsmShapeRow = ((4 / LdsmShapeColumn * kLdsmOpInner) > Shape::kRow) ? (Shape::kRow / kLdsmOpInner) : (4 / LdsmShapeColumn); using LdsmShape = layout::PitchLinearShape<LdsmShapeRow, LdsmShapeColumn>; /// Number and arrangement of LDSM instructions using LdsmIterations = layout::PitchLinearShape< Shape::kRow / kLdsmOpInner / LdsmShapeRow, 1>; /// Number of groups for each tile static int const kGroupsPerTile = Shape::kColumn / InstructionShape::kColumn; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = Array<Element, Policy::kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kRow * InstructionShape::kColumn / kThreads>; private: /// Layout object storing stride values Index stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Internal counter used to determine when to increment byte offset and when /// to XOR it int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE SparseMmaTensorOpMetaTileIterator() : pointer_(nullptr), stride_(0), byte_offset_(0), k_group_idx_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator(TensorRef const &ref, int lane_id) : pointer_(reinterpret_cast<AccessType const *>(ref.data())), stride_(ref.stride(0) / Policy::kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { int access_contiguous = (lane_id % (Shape::kRow / Policy::kElementsPerAccess)); int access_strided = (lane_id / (Shape::kRow / Policy::kElementsPerAccess)); byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof_bits<Element>::value * Policy::kElementsPerAccess / 8; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof_bits<Element>::value / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int offset = tile_offset.row() * Shape::kRow + tile_offset.column() * InstructionShape::kColumn * stride_ * Policy::kElementsPerAccess; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == Policy::kGroupsPerTile) { k_group_idx_ = 0; add_tile_offset( {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); } } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE SparseMmaTensorOpMetaTileIterator &operator--(){ byte_offset_ -= stride_ * InstructionShape::kColumn * sizeof_bits<Element>::value * Policy::kElementsPerAccess / 8; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr = reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsmShape::kContiguous * Policy::kLdsmOpInner * c + Policy::LdsmShape::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>( fetch_ptr[access_idx], source_byte_ptr); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kRow / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kColumn * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no op } }; } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h", "repo_id": "cutlass", "token_count": 4473 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines layout functions used by GEMM+permute path for common tensor or matrix formats. Like Layout functions, permute layout functions map logical coordinates to linear memory. They often require additional data to describe strides between elements. Permute layout functions must implement all members in the interface of NoPermute<> defined in this file. Address offset computation lies in operator() with private member variables {col_permute_, row_permute_ and stride_} as new addresses after permute op. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include "assert.h" #endif #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/matrix.h" #include "cutlass/coord.h" #include "cutlass/tensor_coord.h" namespace cutlass { namespace layout { // template<PermuteTag, typename Layout, bool Inverse> // struct PermuteSelect { // // Try to give a reasonable error message to the user // static_assert(!platform::is_same<Permute, Permute>::value, // aka always_false<T> // "You've tried to use a layout permutation for which the implementation is not availble. " // "In order to provide an implementation for a particular combination of matrix layout " // "and direction (direct/inverse), please specialize PermuteSelect trait."); // }; // Base template for defining specializations of permutation inverses template<typename Permute> struct InversePermute { // Try to give a reasonable error message to the user static_assert(!platform::is_same<Permute, Permute>::value, // aka always_false<T> "To apply permutation to a GEMM input operand (A or B), an inverse permutation for the desired " "permute class must be defined and enabled by specializing cutlass::layout::InversePermute trait."); }; class PermuteBase { public: /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; }; class NoPermute : public PermuteBase { public: // // Methods // /// Constructor from matrix extent CUTLASS_HOST_DEVICE NoPermute(MatrixCoord extent, Index stride) { }; /// Constructor from pitch-linear extent CUTLASS_HOST_DEVICE NoPermute(PitchLinearCoord extent, Index stride) { }; /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { return 0; } // not correct but should never be called /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return 0; } // not correct but should never be called }; template<> struct InversePermute<NoPermute> { using type = NoPermute; }; /// Helper trait to detect if permute operation is a noop template<typename Permute> inline bool constexpr is_trivial_permute = platform::is_same<Permute, cutlass::layout::NoPermute>::value; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Defines permute layouts of various tensor formats. // ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // Tensor4DPermute0213 ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped /// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor. template <int D1, int D2> class Tensor4DPermute0213RowMajor : public PermuteBase { private: // // Data members // Index D3_; Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermute0213RowMajor(MatrixCoord extent, Index stride) { assert(extent.row() % D1 == 0); assert(extent.column() % D2 == 0); D3_ = extent.column() / D2; stride_ = stride * D1 / D2; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermute0213RowMajor(PitchLinearCoord extent, Index stride) : Tensor4DPermute0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // [i,j,k,l] -> [i,k,j,l] Index l = coord.column() % D3_; Index k = coord.column() / D3_; Index j = coord.row() % D1; Index i = coord.row() / D1; MatrixCoord permuted{k + i * D2, l + j * D3_}; return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.strided(), coord.contiguous())); } }; // Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2 template <int D1, int D2> class Tensor4DPermute0213RowMajorInverse : public Tensor4DPermute0213RowMajor<D2, D1> { public: using Base = Tensor4DPermute0213RowMajor<D2, D1>; using Base::Base; }; template<int D1, int D2> struct InversePermute<Tensor4DPermute0213RowMajor<D1, D2>> { using type = Tensor4DPermute0213RowMajorInverse<D1, D2>; }; template<int D1, int D2> struct InversePermute<Tensor4DPermute0213RowMajorInverse<D1, D2>> { using type = Tensor4DPermute0213RowMajor<D1, D2>; }; /// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped /// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor. template <int D1, int D2> class Tensor4DPermute0213ColumnMajor : public PermuteBase { private: // // Data members // Index D0_; Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermute0213ColumnMajor(MatrixCoord extent, Index stride) { assert(extent.row() % D1 == 0); assert(extent.column() % D2 == 0); D0_ = extent.row() / D1; stride_ = stride * D2 / D1; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermute0213ColumnMajor(PitchLinearCoord extent, Index stride) : Tensor4DPermute0213ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // [i,j,k,l] -> [i,k,j,l] Index l = coord.column() / D2; Index k = coord.column() % D2; Index j = coord.row() / D0_; Index i = coord.row() % D0_; MatrixCoord permuted{i + k * D0_, j + l * D1}; return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.contiguous(), coord.strided())); } }; // Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2 template <int D1, int D2> class Tensor4DPermute0213ColumnMajorInverse : public Tensor4DPermute0213ColumnMajor<D2, D1> { public: using Base = Tensor4DPermute0213ColumnMajor<D2, D1>; using Base::Base; }; template<int D1, int D2> struct InversePermute<Tensor4DPermute0213ColumnMajor<D1, D2>> { using type = Tensor4DPermute0213ColumnMajorInverse<D1, D2>; }; template<int D1, int D2> struct InversePermute<Tensor4DPermute0213ColumnMajorInverse<D1, D2>> { using type = Tensor4DPermute0213ColumnMajor<D1, D2>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Tensor4DPermuteBMM0213 ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped /// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM tensor. template <int D1> class Tensor4DPermuteBMM0213RowMajor : public PermuteBase { private: // // Data members // Index D3_; Index stride_; Index batch_stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0213RowMajor(MatrixCoord extent, Index stride) { Index D2 = extent.row(); D3_ = extent.column(); stride_ = stride * D1; batch_stride_ = D2 * stride_; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0213RowMajor(PitchLinearCoord extent, Index stride) : Tensor4DPermuteBMM0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // The batch index for BMM Index BMM_batch_idx = blockIdx.z; // [i,j,k,l] -> [i,k,j,l] Index l = coord.column(); Index k = coord.row(); Index j = BMM_batch_idx % D1; Index i = BMM_batch_idx / D1; Index pbatch = i; MatrixCoord pcoord{k, l + j * D3_}; return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column(); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.strided(), coord.contiguous())); } }; template <int D1> class Tensor4DPermuteBMM0213RowMajorInverse : public PermuteBase { private: // // Data members // Index D3_; Index stride_; Index batch_stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord extent, Index stride) { assert(extent.column() % D1 == 0); Index D2 = extent.row(); D3_ = extent.column() / D1; stride_ = stride / D1; batch_stride_ = D2 * stride_; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0213RowMajorInverse(PitchLinearCoord extent, Index stride) : Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // The batch index for BMM Index BMM_batch_idx = blockIdx.z; // The following assumes grouping [(D0)->batch, (D2)->row, (D1,D3)->col] Index l = coord.column() % D3_; Index j = coord.column() / D3_; Index k = coord.row(); Index i = BMM_batch_idx; // compute original [batch, row, col] index Index pbatch = j + i * D1; MatrixCoord pcoord{k, l}; return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column(); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.strided(), coord.contiguous())); } }; template<int D1> struct InversePermute<Tensor4DPermuteBMM0213RowMajor<D1>> { using type = Tensor4DPermuteBMM0213RowMajorInverse<D1>; }; template<int D1> struct InversePermute<Tensor4DPermuteBMM0213RowMajorInverse<D1>> { using type = Tensor4DPermuteBMM0213RowMajor<D1>; }; /// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped /// as [B/D1, D1, M, N]. Then perform permute([0, 3, 2, 1]) on the corresponding whole BMM tensor. template <int D1> class Tensor4DPermuteBMM0321ColumnMajor : public PermuteBase { private: // // Data members // Index D2_; Index stride_; Index batch_stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord extent, Index stride) { D2_ = extent.row(); Index D3 = extent.column(); stride_ = stride * D1; batch_stride_ = stride_ * D3; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0321ColumnMajor(PitchLinearCoord extent, Index stride) : Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { Index BMM_batch_idx = blockIdx.z; // [i,j,k,l] -> [i,k,j,l] Index l = coord.column(); Index k = coord.row(); Index j = BMM_batch_idx % D1; Index i = BMM_batch_idx / D1; Index pbatch = i; MatrixCoord pcoord{k + j * D2_, l}; return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.contiguous(), coord.strided())); } }; template <int D1> class Tensor4DPermuteBMM0321ColumnMajorInverse : public PermuteBase { private: // // Data members // Index D2_; Index stride_; Index batch_stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord extent, Index stride) { assert(extent.row() % D1 == 0); D2_ = extent.row() / D1; Index D3 = extent.column(); stride_ = stride / D1; batch_stride_ = stride_ * D3; } /// Constructor CUTLASS_HOST_DEVICE Tensor4DPermuteBMM0321ColumnMajorInverse(PitchLinearCoord extent, Index stride) : Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { Index BMM_batch_idx = blockIdx.z; // The following assumes grouping [(D0)->batch, (D1,D2)->row, (D3)->col] Index l = coord.column(); Index k = coord.row() % D2_; Index j = coord.row() / D2_; Index i = BMM_batch_idx; Index pbatch = i * D1 + j; MatrixCoord pcoord{k, l}; return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.contiguous(), coord.strided())); } }; template<int D1> struct InversePermute<Tensor4DPermuteBMM0321ColumnMajor<D1>> { using type = Tensor4DPermuteBMM0321ColumnMajorInverse<D1>; }; template<int D1> struct InversePermute<Tensor4DPermuteBMM0321ColumnMajorInverse<D1>> { using type = Tensor4DPermuteBMM0321ColumnMajor<D1>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Tensor5DPermute20314 ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permute layout function for 5-D permuted tensors with output matrix (dimension as [M, N]) reshaped /// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding output tensor. template <int T1, int T2, int T3> class Tensor5DPermute20314RowMajor : public PermuteBase { private: // // Data members // Index T0_; Index T4_; Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute20314RowMajor(MatrixCoord extent, Index stride) { assert(extent.row() % T1 == 0); assert(extent.column() % (T2 * T3) == 0); T0_ = extent.row() / T1; T4_ = extent.column() / (T2 * T3); /// Update stride_permute with stride stride_ = stride / T2 * T1; // stride in Elements } /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute20314RowMajor(PitchLinearCoord extent, Index stride) : Tensor5DPermute20314RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X // is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T2, T0, T3, T1, T4]. Index m = coord.column() % T4_; Index l = (coord.column() / T4_) % T3; Index k = (coord.column() / T4_) / T3; Index j = coord.row() % T1; Index i = coord.row() / T1; MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T1 * T4_}; return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.strided(), coord.contiguous())); } }; /// Inverse for Tensor5DPermute20314 (could also be given a proper name, e.g. Tensor5DPermute13024). template <int T1, int T2, int T3> class Tensor5DPermute20314RowMajorInverse : public PermuteBase { private: // // Data members // Index T0_; Index T4_; // Permuted stride in units of elements Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute20314RowMajorInverse(MatrixCoord extent, Index stride) { assert(extent.row() % T2 == 0); assert(extent.column() % (T1 * T3) == 0); T0_ = extent.row() / T2; T4_ = extent.column() / (T1 * T3); stride_ = stride / T1 * T2; } /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute20314RowMajorInverse(PitchLinearCoord extent, Index stride) : Tensor5DPermute20314RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} /// Computes the offset after the inverse of permute operation in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { Index m = coord.column() % T4_; Index j = (coord.column() / T4_) % T1; Index l = (coord.column() / T4_) / T1; Index i = coord.row() % T0_; Index k = coord.row() / T0_; MatrixCoord permuted{j + i * T1, m + l * T4_ + k * T3 * T4_}; return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.strided(), coord.contiguous())); } }; template<int T1, int T2, int T3> struct InversePermute<Tensor5DPermute20314RowMajor<T1, T2, T3>> { using type = Tensor5DPermute20314RowMajorInverse<T1, T2, T3>; }; template<int T1, int T2, int T3> struct InversePermute<Tensor5DPermute20314RowMajorInverse<T1, T2, T3>> { using type = Tensor5DPermute20314RowMajor<T1, T2, T3>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Tensor5DPermute02413 ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permute layout function for 5-D permuted tensors with matrix (dimensions [M, N]) reshaped /// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([0, 2, 4, 1, 3]) on the corresponding tensor. template <int T1, int T2, int T3> class Tensor5DPermute02413ColumnMajor : public PermuteBase { private: // // Data members // Index T0_; Index T4_; Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute02413ColumnMajor(MatrixCoord extent, Index stride) { assert(extent.row() % T1 == 0); assert(extent.column() % (T2 * T3) == 0); T0_ = extent.row() / T1; T4_ = extent.column() / (T2 * T3); /// Update stride_permute with stride stride_ = stride / T1 * T2; // stride in Elements } /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute02413ColumnMajor(PitchLinearCoord extent, Index stride) : Tensor5DPermute02413ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { // Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X // is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T0, T2, T4, T1, T3]. Index m = (coord.column() / T2) / T3; Index l = (coord.column() / T2) % T3; Index k = coord.column() % T2; Index j = coord.row() / T0_; Index i = coord.row() % T0_; MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T4_ * T1}; return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.contiguous(), coord.strided())); } }; /// Inverse for Tensor5DPermute02413ColumnMajor template <int T1, int T2, int T3> class Tensor5DPermute02413ColumnMajorInverse : public PermuteBase { private: // // Data members // Index T0_; Index T4_; // Permuted stride in units of elements Index stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute02413ColumnMajorInverse(MatrixCoord extent, Index stride) { assert(extent.row() % T2 == 0); assert(extent.column() % (T1 * T3) == 0); T0_ = extent.row() / T2; T4_ = extent.column() / (T1 * T3); stride_ = stride / T2 * T1; } /// Constructor CUTLASS_HOST_DEVICE Tensor5DPermute02413ColumnMajorInverse(PitchLinearCoord extent, Index stride) : Tensor5DPermute02413ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} /// Computes the offset after the inverse of permute operation in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(MatrixCoord coord) const { Index m = coord.column() % T4_; Index j = (coord.column() / T4_) % T1; Index l = (coord.column() / T4_) / T1; Index i = coord.row() % T0_; Index k = coord.row() / T0_; MatrixCoord permuted{i + j * T0_, k + l * T2 + m * T2 * T3}; return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); } /// Computes the offset after Permute Op in logical elements CUTLASS_HOST_DEVICE LongIndex operator()(PitchLinearCoord coord) const { return operator()(MatrixCoord(coord.contiguous(), coord.strided())); } }; template<int T1, int T2, int T3> struct InversePermute<Tensor5DPermute02413ColumnMajor<T1, T2, T3>> { using type = Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>; }; template<int T1, int T2, int T3> struct InversePermute<Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>> { using type = Tensor5DPermute02413ColumnMajor<T1, T2, T3>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass
cutlass/include/cutlass/layout/permute.h/0
{ "file_path": "cutlass/include/cutlass/layout/permute.h", "repo_id": "cutlass", "token_count": 8996 }
40