text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
get_filename_component(NvidiaCutlass_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) include(CMakeFindDependencyMacro) if(TARGET nvidia::cutlass::CUTLASS) return() endif() include("${NvidiaCutlass_CMAKE_DIR}/NvidiaCutlassTargets.cmake")
cutlass/cmake/NvidiaCutlassConfig.cmake.in/0
{ "file_path": "cutlass/cmake/NvidiaCutlassConfig.cmake.in", "repo_id": "cutlass", "token_count": 97 }
0
var searchData= [ ['quiet_5fnan',['quiet_NaN',['../structstd_1_1numeric__limits_3_01cutlass_1_1half__t_01_4.html#a8c7eafdd3b121353c0914dc6e1c0d108',1,'std::numeric_limits< cutlass::half_t >']]] ];
cutlass/docs/search/all_10.js/0
{ "file_path": "cutlass/docs/search/all_10.js", "repo_id": "cutlass", "token_count": 103 }
1
var searchData= [ ['yes',['yes',['../structcutlass_1_1platform_1_1is__base__of__helper.html#ac1cf3f804e7686213fd42c678cc6d669',1,'cutlass::platform::is_base_of_helper']]] ];
cutlass/docs/search/all_18.js/0
{ "file_path": "cutlass/docs/search/all_18.js", "repo_id": "cutlass", "token_count": 79 }
2
var searchData= [ ['arch',['arch',['../namespacecutlass_1_1arch.html',1,'cutlass']]], ['cutlass',['cutlass',['../namespacecutlass.html',1,'']]], ['debug',['debug',['../namespacecutlass_1_1debug.html',1,'cutlass']]], ['detail',['detail',['../namespacecutlass_1_1detail.html',1,'cutlass']]], ['detail',['detail',['../namespacecutlass_1_1gemm_1_1thread_1_1detail.html',1,'cutlass::gemm::thread']]], ['detail',['detail',['../namespacecutlass_1_1reference_1_1host_1_1detail.html',1,'cutlass::reference::host']]], ['detail',['detail',['../namespacecutlass_1_1epilogue_1_1threadblock_1_1detail.html',1,'cutlass::epilogue::threadblock']]], ['detail',['detail',['../namespacecutlass_1_1gemm_1_1threadblock_1_1detail.html',1,'cutlass::gemm::threadblock']]], ['detail',['detail',['../namespacecutlass_1_1reference_1_1detail.html',1,'cutlass::reference']]], ['detail',['detail',['../namespacecutlass_1_1gemm_1_1kernel_1_1detail.html',1,'cutlass::gemm::kernel']]], ['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1detail.html',1,'cutlass::reference::device']]], ['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1kernel_1_1detail.html',1,'cutlass::reference::device::kernel']]], ['device',['device',['../namespacecutlass_1_1gemm_1_1device.html',1,'cutlass::gemm']]], ['device',['device',['../namespacecutlass_1_1reference_1_1device.html',1,'cutlass::reference']]], ['device_5fmemory',['device_memory',['../namespacecutlass_1_1device__memory.html',1,'cutlass']]], ['epilogue',['epilogue',['../namespacecutlass_1_1epilogue.html',1,'cutlass']]], ['gemm',['gemm',['../namespacecutlass_1_1gemm.html',1,'cutlass']]], ['host',['host',['../namespacecutlass_1_1reference_1_1host.html',1,'cutlass::reference']]], ['kernel',['kernel',['../namespacecutlass_1_1reduction_1_1kernel.html',1,'cutlass::reduction']]], ['kernel',['kernel',['../namespacecutlass_1_1gemm_1_1kernel.html',1,'cutlass::gemm']]], ['kernel',['kernel',['../namespacecutlass_1_1reference_1_1device_1_1kernel.html',1,'cutlass::reference::device']]], ['layout',['layout',['../namespacecutlass_1_1layout.html',1,'cutlass']]], ['library',['library',['../namespacecutlass_1_1library.html',1,'cutlass']]], ['platform',['platform',['../namespacecutlass_1_1platform.html',1,'cutlass']]], ['reduction',['reduction',['../namespacecutlass_1_1reduction.html',1,'cutlass']]], ['reference',['reference',['../namespacecutlass_1_1reference.html',1,'cutlass']]], ['thread',['thread',['../namespacecutlass_1_1gemm_1_1thread.html',1,'cutlass::gemm']]], ['thread',['thread',['../namespacecutlass_1_1reference_1_1device_1_1thread.html',1,'cutlass::reference::device']]], ['thread',['thread',['../namespacecutlass_1_1thread.html',1,'cutlass']]], ['thread',['thread',['../namespacecutlass_1_1reduction_1_1thread.html',1,'cutlass::reduction']]], ['thread',['thread',['../namespacecutlass_1_1epilogue_1_1thread.html',1,'cutlass::epilogue']]], ['thread',['thread',['../namespacecutlass_1_1transform_1_1thread.html',1,'cutlass::transform']]], ['threadblock',['threadblock',['../namespacecutlass_1_1epilogue_1_1threadblock.html',1,'cutlass::epilogue']]], ['threadblock',['threadblock',['../namespacecutlass_1_1gemm_1_1threadblock.html',1,'cutlass::gemm']]], ['threadblock',['threadblock',['../namespacecutlass_1_1transform_1_1threadblock.html',1,'cutlass::transform']]], ['transform',['transform',['../namespacecutlass_1_1transform.html',1,'cutlass']]], ['warp',['warp',['../namespacecutlass_1_1epilogue_1_1warp.html',1,'cutlass::epilogue']]], ['warp',['warp',['../namespacecutlass_1_1gemm_1_1warp.html',1,'cutlass::gemm']]] ];
cutlass/docs/search/namespaces_0.js/0
{ "file_path": "cutlass/docs/search/namespaces_0.js", "repo_id": "cutlass", "token_count": 1418 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates how to use the PredicatedTileIterator in CUTLASS to load data from addressable memory, and then store it back into addressable memory. TileIterator is a core concept in CUTLASS that enables efficient loading and storing of data to and from addressable memory. The PredicateTileIterator accepts a ThreadMap type, which defines the mapping of threads to a "tile" in memory. This separation of concerns enables user-defined thread mappings to be specified. In this example, a PredicatedTileIterator is used to load elements from a tile in global memory, stored in column-major layout, into a fragment and then back into global memory in the same layout. This example uses CUTLASS utilities to ease the matrix operations. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS includes #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/transform/pitch_linear_thread_map.h" // // CUTLASS utility includes // // Defines operator<<() to write TensorView objects to std::ostream #include "cutlass/util/tensor_view_io.h" // Defines cutlass::HostTensor<> #include "cutlass/util/host_tensor.h" // Defines cutlass::reference::host::TensorFill() and // cutlass::reference::host::TensorFillBlockSequential() #include "cutlass/util/reference/host/tensor_fill.h" #pragma warning( disable : 4503) /////////////////////////////////////////////////////////////////////////////////////////////////// /// Define PredicatedTileIterators to load and store a M-by-K tile, in column major layout. template <typename Iterator> __global__ void copy( typename Iterator::Params dst_params, typename Iterator::Element *dst_pointer, typename Iterator::Params src_params, typename Iterator::Element *src_pointer, cutlass::Coord<2> extent) { Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x); Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x); // PredicatedTileIterator uses PitchLinear layout and therefore takes in a PitchLinearShape. // The contiguous dimension can be accessed via Iterator::Shape::kContiguous and the strided // dimension can be accessed via Iterator::Shape::kStrided int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided; typename Iterator::Fragment fragment; for(size_t i = 0; i < fragment.size(); ++i) { fragment[i] = 0; } src_iterator.load(fragment); dst_iterator.store(fragment); ++src_iterator; ++dst_iterator; for(; iterations > 1; --iterations) { src_iterator.load(fragment); dst_iterator.store(fragment); ++src_iterator; ++dst_iterator; } } /////////////////////////////////////////////////////////////////////////////////////////////////// // Initializes the source tile with sequentially increasing values and performs the copy into // the destination tile using two PredicatedTileIterators, one to load the data from addressable // memory into a fragment (regiser-backed array of elements owned by each thread) and another to // store the data from the fragment back into the addressable memory of the destination tile. cudaError_t TestTileIterator(int M, int K) { // For this example, we chose a <64, 4> tile shape. The PredicateTileIterator expects // PitchLinearShape and PitchLinear layout. using Shape = cutlass::layout::PitchLinearShape<64, 4>; using Layout = cutlass::layout::PitchLinear; using Element = int; int const kThreads = 32; // ThreadMaps define how threads are mapped to a given tile. The PitchLinearStripminedThreadMap // stripmines a pitch-linear tile among a given number of threads, first along the contiguous // dimension then along the strided dimension. using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<Shape, kThreads>; // Define the PredicateTileIterator, using TileShape, Element, Layout, and ThreadMap types using Iterator = cutlass::transform::threadblock::PredicatedTileIterator< Shape, Element, Layout, 1, ThreadMap>; cutlass::Coord<2> copy_extent = cutlass::make_Coord(M, K); cutlass::Coord<2> alloc_extent = cutlass::make_Coord(M, K); // Allocate source and destination tensors cutlass::HostTensor<Element, Layout> src_tensor(alloc_extent); cutlass::HostTensor<Element, Layout> dst_tensor(alloc_extent); Element oob_value = Element(-1); // Initialize destination tensor with all -1s cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value); // Initialize source tensor with sequentially increasing values cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity()); dst_tensor.sync_device(); src_tensor.sync_device(); typename Iterator::Params dst_params(dst_tensor.layout()); typename Iterator::Params src_params(src_tensor.layout()); dim3 block(kThreads, 1); dim3 grid(1, 1); // Launch copy kernel to perform the copy copy<Iterator><<< grid, block >>>( dst_params, dst_tensor.device_data(), src_params, src_tensor.device_data(), copy_extent ); cudaError_t result = cudaGetLastError(); if(result != cudaSuccess) { std::cerr << "Error - kernel failed." << std::endl; return result; } dst_tensor.sync_host(); // Verify results for(int s = 0; s < alloc_extent[1]; ++s) { for(int c = 0; c < alloc_extent[0]; ++c) { Element expected = Element(0); if(c < copy_extent[0] && s < copy_extent[1]) { expected = src_tensor.at({c, s}); } else { expected = oob_value; } Element got = dst_tensor.at({c, s}); bool equal = (expected == got); if(!equal) { std::cerr << "Error - source tile differs from destination tile." << std::endl; return cudaErrorUnknown; } } } return cudaSuccess; } int main(int argc, const char *arg[]) { cudaError_t result = TestTileIterator(57, 35); if(result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit return result == cudaSuccess ? 0 : -1; }
cutlass/examples/04_tile_iterator/tile_iterator.cu/0
{ "file_path": "cutlass/examples/04_tile_iterator/tile_iterator.cu", "repo_id": "cutlass", "token_count": 2658 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include <algorithm> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // Note that if the output is column major, the bias has to be per row. i.e. every row has different bias. // If the output is row major, the bias has to be per column, i.e. every column has different bias. // Below list some other notices: // // Note this example only works for ColumnMajor output because // 1) we only have row major epilogue. // 2) we swap A and B if the output is column major then we can still use the // row major epilogue. // 3) Mx1 bias vector becomes 1xM after the swapping/transposing. // 4) we can use the existing OutputIterator to load 1xM bias vector. using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // Define the epilogue operation as LinearCombinationRelu. This is approximately equal to // // d_ij = max(0, alpha * sum_k(a_ik * b_kj) + c_ij ) // using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue, // <- data type for alpha in linear combination function cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // <- alpha x C + bias // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias( {problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1 cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device {tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM // to project away the N dimension by setting the stride to zero. tensor_d.device_ref(), // <- reference to matrix D on device {alpha}, // <- alpha split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device_reference; // Launch device reference to compute strictly the product A * B gemm_device_reference( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), 0, tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int i = 0; i < problem_size.m(); ++i) { for (int j = 0; j < problem_size.n(); ++j) { tensor_ref_d.at({i, j}) = std::max( ElementOutput(0), ElementOutput(tensor_ref_d.at({i, j}) + tensor_c_bias.at({i, 0})) ); } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
cutlass/examples/12_gemm_bias_relu/gemm_bias_relu.cu/0
{ "file_path": "cutlass/examples/12_gemm_bias_relu/gemm_bias_relu.cu", "repo_id": "cutlass", "token_count": 5210 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "device/b2b_implicit_gemm_convolution.h" #include "b2b_interleaved_conv2d_run.h" #include "test_run.h" //////////////////////////////////////////////////////////////////////////////// cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_0 ( {32, 56, 56, 64}, // input size (NHWC) {64, 3, 3, 64}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) {32, 56, 56, 64} // output size (NPQK) ); cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_1 ( {32, 56, 56, 64}, // input size (NHWC) {128, 1, 1, 64}, // filter size (KRSC) {0, 0, 0, 0}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) {32, 56, 56, 128} // output size (NPQK) ); bool run_nonfused_conv2d_fprop_optimized_s8_sm80() { using ElementA = int8_t; using ElementB = int8_t; using ElementC = int8_t; using ElementAccumulator = int32_t; using ElementCompute = float; ElementCompute alpha0 = ElementCompute(1); ElementCompute beta0 = ElementCompute(1); //beta=1 for bias ElementCompute alpha1 = ElementCompute(1); ElementCompute beta1 = ElementCompute(1); //beta=1 for bias using ThreadblockShape0 = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape0 = cutlass::gemm::GemmShape<64, 64, 64>; using ThreadblockShape1 = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape1 = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; using Conv2dFpropKernel0 = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementA, cutlass::layout::TensorNCxHWx<32>, ElementB, cutlass::layout::TensorCxRSKx<32>, ElementC, cutlass::layout::TensorNCxHWx<32>, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, ThreadblockShape0, WarpShape0, InstructionShape, cutlass::epilogue::thread::LinearCombinationRelu< ElementC, 64 / cutlass::sizeof_bits<ElementC>::value, ElementAccumulator, ElementCompute, cutlass::epilogue::thread::ScaleType::NoBetaScaling >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 3, cutlass::arch::OpMultiplyAddSaturate, cutlass::conv::IteratorAlgorithm::kOptimized >::Kernel; using Conv2dFprop0 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel0>; using Conv2dFpropKernel1 = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementA, cutlass::layout::TensorNCxHWx<32>, ElementB, cutlass::layout::TensorCxRSKx<32>, ElementC, cutlass::layout::TensorNCxHWx<32>, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, ThreadblockShape1, WarpShape1, InstructionShape, cutlass::epilogue::thread::LinearCombinationRelu< ElementC, 64 / cutlass::sizeof_bits<ElementC>::value, ElementAccumulator, ElementCompute, cutlass::epilogue::thread::ScaleType::NoBetaScaling >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 3, cutlass::arch::OpMultiplyAddSaturate, cutlass::conv::IteratorAlgorithm::kOptimized >::Kernel; using Conv2dFprop1 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel1>; B2bInterleavedNonFusedConv2dRun<Conv2dFprop0, Conv2dFprop1, 32> nonFusedConv2d; std::cout << "Running Non-fused back-to-back INT8 interleaved Optimized Convolution Fprops...\n"; bool pass = nonFusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial, alpha0, beta0, alpha1, beta1); if(pass) std::cout << "Pass\n"; else std::cout << "Fail\n"; return pass; } bool run_fused_conv2d_fprop_optimized_s8_sm80_rf_res() { using ElementA = int8_t; using ElementB = int8_t; using ElementC = int8_t; using ElementAccumulator = int32_t; using ElementCompute = float; ElementCompute alpha0 = ElementCompute(1); //Fused kernel has built-in bias, setting beta=0 ElementCompute beta0 = ElementCompute(0); ElementCompute alpha1 = ElementCompute(1); ElementCompute beta1 = ElementCompute(1); //beta=1 for bias using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 64>; using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; using EpilogueOutputOp0 = cutlass::epilogue::thread::LinearCombinationRelu< ElementC, 8 * InstructionShape::kN / 32, ElementAccumulator, ElementCompute, cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling >; using EpilogueOutputOp1 = cutlass::epilogue::thread::LinearCombinationRelu< ElementC, 64 / cutlass::sizeof_bits<ElementC>::value, ElementAccumulator, ElementCompute, cutlass::epilogue::thread::ScaleType::NoBetaScaling >; using B2bConv2dFpropKernel = typename cutlass::conv::kernel::DefaultB2bConv2dFprop< ElementA, cutlass::layout::TensorNCxHWx<32>, ElementB, cutlass::layout::TensorCxRSKx<32>, ElementC, cutlass::layout::TensorNCxHWx<32>, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>, 3, cutlass::arch::OpMultiplyAddSaturate, cutlass::conv::IteratorAlgorithm::kOptimized >::Kernel; using B2bConv2dFprop = cutlass::conv::device::B2bImplicitGemmConvolution<B2bConv2dFpropKernel>; B2bInterleavedFusedConv2dRun<B2bConv2dFprop, 32> fusedConv2d; std::cout << "Running Fused back-to-back INT8 interleaved Optimized Convolution Fprops with RF residency...\n"; bool pass = fusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial, alpha0, beta0, alpha1, beta1); if(pass) std::cout << "Pass\n"; else std::cout << "Fail\n"; return pass; } int main() { std::vector<bool (*)()>funcs = { &run_nonfused_conv2d_fprop_optimized_s8_sm80, &run_fused_conv2d_fprop_optimized_s8_sm80_rf_res }; return testRun(80, funcs, "conv int8 RF residency"); } ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu", "repo_id": "cutlass", "token_count": 3356 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/transform/threadblock/predicated_vector_access_iterator.h" #include "cutlass/transform/threadblock/vector_iterator.h" #include "cutlass/transform/warp/vector_fragment_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "kernel/default_b2b_conv2d_fprop.h" #include "kernel/b2b_implicit_gemm_convolution.h" #include "threadblock/b2b_implicit_gemm_multistage.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultB2bConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA; using IteratorA0 = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, LayoutA, ThreadMapA0 >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB; using IteratorB0 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, LayoutB, ThreadMapB0 >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; // Use fragment iterator for A operand using AccumulatorLayout = cutlass::layout::ColumnMajor; using FragmentIteratorA1 = cutlass::gemm::warp::MmaTensorOpFragmentIterator< cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape MmaCore1::Shape::kK, //kBlocksColumn ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 2; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Warp-level iterators to load scale and bias vectors using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator< MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias, LayoutScaleBias, InstructionShape, kElementsPerAccess>; // Define iterators over tiles from the B operand using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB; using IteratorB1 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, LayoutB, ThreadMapB1 >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmMultistage< ThreadblockShape0, IteratorA0, SmemIteratorA0, arch::CacheOperation::Always, IteratorB0, SmemIteratorB0, arch::CacheOperation::Global, ThreadblockShape1, FragmentIteratorA1, IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias, IteratorB1, SmemIteratorB1, arch::CacheOperation::Global, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int InterleavedK > struct DefaultB2bConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapA0 = typename MmaCore0::SmemThreadMapA; using IteratorA0 = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA0 >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB0 = typename MmaCore0::SmemThreadMapB; using IteratorB0 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB0 >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; // Use fragment iterator for A operand using AccumulatorLayout = cutlass::layout::RowMajor; using FragmentIteratorA1 = cutlass::gemm::warp::MmaTensorOpFragmentIterator< cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape MmaCore1::Shape::kK, //kBlocksColumn ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 4; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Warp-level iterators to load scale and bias vectors using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator< MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias, LayoutScaleBias, InstructionShape, kElementsPerAccess>; using ThreadMapB1 = typename MmaCore1::SmemThreadMapB; using IteratorB1 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB1 >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmMultistage< ThreadblockShape0, IteratorA0, SmemIteratorA0, arch::CacheOperation::Always, IteratorB0, SmemIteratorB0, arch::CacheOperation::Global, ThreadblockShape1, FragmentIteratorA1, IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias, IteratorB1, SmemIteratorB1, arch::CacheOperation::Global, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and /// multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultB2bConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA; using IteratorA0 = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, LayoutA, ThreadMapA0 >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB; using IteratorB0 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, LayoutB, ThreadMapB0 >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; // Use fragment iterator for A operand using AccumulatorLayout = cutlass::layout::ColumnMajor; using FragmentIteratorA1 = cutlass::gemm::warp::MmaTensorOpFragmentIterator< cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape MmaCore1::Shape::kK, //kBlocksColumn ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 2; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Warp-level iterators to load scale and bias vectors using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator< MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias, LayoutScaleBias, InstructionShape, kElementsPerAccess>; // Define iterators over tiles from the B operand using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB; using IteratorB1 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, LayoutB, ThreadMapB1 >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmMultistage< ThreadblockShape0, IteratorA0, SmemIteratorA0, arch::CacheOperation::Always, IteratorB0, SmemIteratorB0, arch::CacheOperation::Global, ThreadblockShape1, FragmentIteratorA1, IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias, IteratorB1, SmemIteratorB1, arch::CacheOperation::Global, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and // multistage pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int InterleavedK > struct DefaultB2bConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true>; using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapA0 = typename MmaCore0::SmemThreadMapA; using IteratorA0 = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA0 >; using SmemIteratorA0 = typename MmaCore0::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB0 = typename MmaCore0::SmemThreadMapB; using IteratorB0 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB0 >; using SmemIteratorB0 = typename MmaCore0::SmemIteratorB; // Use fragment iterator for A operand using AccumulatorLayout = cutlass::layout::RowMajor; using FragmentIteratorA1 = cutlass::gemm::warp::MmaTensorOpFragmentIterator< cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape MmaCore1::Shape::kK, //kBlocksColumn ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>; /// Define iterators over tiles from scale/bias vectors using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter static int const kElementsPerAccess = 4; using IteratorAccumulatorScaleBias = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>, cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>, ElementScaleBias, LayoutScaleBias, kElementsPerAccess> >; // Warp-level iterators to load scale and bias vectors using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator< MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias, LayoutScaleBias, InstructionShape, kElementsPerAccess>; using ThreadMapB1 = typename MmaCore1::SmemThreadMapB; using IteratorB1 = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB1 >; using SmemIteratorB1 = typename MmaCore1::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp; using MmaPolicy0 = typename MmaCore0::MmaPolicy; using MmaPolicy1 = typename MmaCore1::MmaPolicy; // Define the Mma using B2bMma = threadblock::B2bImplicitGemmMultistage< ThreadblockShape0, IteratorA0, SmemIteratorA0, arch::CacheOperation::Always, IteratorB0, SmemIteratorB0, arch::CacheOperation::Global, ThreadblockShape1, FragmentIteratorA1, IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias, IteratorB1, SmemIteratorB1, arch::CacheOperation::Global, EpilogueOutputOp0, MmaPolicy0, MmaPolicy1, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape1, WarpMmaTensorOp1, 1, EpilogueOutputOp1, EpilogueOutputOp1::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution< B2bMma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h", "repo_id": "cutlass", "token_count": 9245 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped Back-to-back fused GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "threadblock/b2b_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorA0_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA0_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile // (concept::MmaTensorOpFragmentIterator) typename FragmentIteratorA1_, /// Iterates over vectors of scale and bias vector in global memory // (concept: VectorIterator) typename IteratorAccumulatorScaleBias_, /// FragmentIterator to load Scale or Bias vector from threadblock fragment typename FragmentIteratorA1ScaleBias_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...) typename OutputOp_, /// Policy describing tuning details (concept: MmaPipelinedPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPipelinedPolicy) typename Policy1_, /// Transformation applied to A0 operand typename TransformA0_ = NumericArrayConverter< typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>, /// /// Transformation applied to B0 operand typename TransformB0_ = NumericArrayConverter< typename SmemIteratorB0_::Element, typename IteratorB0_::Element, IteratorB0_::Fragment::kElements>, /// /// Transformation applied to B1 operand typename TransformB1_ = NumericArrayConverter< typename SmemIteratorB1_::Element, typename IteratorB1_::Element, IteratorB1_::Fragment::kElements>, /// Used for partial specialization typename Enable = bool > class B2bMmaPipelined : public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2> { public: ///< Base class using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2>; using Shape0 = Shape0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using IteratorA0 = IteratorA0_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA0; using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB0; using Policy0 = Policy0_; ///< Policy describing tuning details using SmemIteratorA0 = SmemIteratorA0_; using SmemIteratorB0 = SmemIteratorB0_; using Shape1 = Shape1_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over intermediate accumulator tile using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< Iterates over tiles of the scale and bias vectors in global memory using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_; ///< WarpIterator to load Scale or Bias vector from the threadblock fragment using IteratorB1 = IteratorB1_; ///< Iterates over tiles of B operand in global memory using Policy1 = Policy1_; ///< Policy describing tuning details using Policy = Policy1; ///< Export Policy1 as the threadblock-level Mma's policy using Shape = Shape1; using SmemIteratorB1 = SmemIteratorB1_; using ElementC = ElementC_; ///< Data type of accumulator matrix using LayoutC = LayoutC_; ///< Layout of accumulator matrix using OutputOp = OutputOp_; ///< Epilogue after 1st Gemm static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); using TransformA0 = TransformA0_; using TransformB0 = TransformB0_; using TransformB1 = TransformB1_; // // Dependent types // /// Fragment of operand A loaded from global memory using FragmentA0 = typename IteratorA0::Fragment; /// Fragment of operand B loaded from global memory using FragmentB0 = typename IteratorB0::Fragment; /// Fragment of accumulator tile using FragmentC0 = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; /// Fragment of Scale and Bias loaded from global memory using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment; /// Fragment of operand B loaded from global memory using FragmentB1 = typename IteratorB1::Fragment; /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Obtain the arch tag from the warp-level operator using ArchTag = typename Policy0::Operator::ArchTag; /// Complex transform on A0 operand static ComplexTransform const kTransformA0 = Operator0::kTransformA; /// Complex transform on B0 operand static ComplexTransform const kTransformB0 = Operator0::kTransformB; /// Complex transform on B1 operand static ComplexTransform const kTransformB1 = Operator1::kTransformB; /// Complex transform exports needed by higher-level kernels static ComplexTransform const kTransformA = kTransformA0; static ComplexTransform const kTransformB = kTransformB0; /// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2"); private: using WarpFragmentA0 = typename Operator0::FragmentA; using WarpFragmentB0 = typename Operator0::FragmentB; /// Warp Fragment of operand A1 loaded from accmulator tile using WarpFragmentA1 = typename FragmentIteratorA1::Fragment; /// Warp Fragment of operand A1 scale and bias loaded from threadblock fragment using WarpFragmentA1ScaleBias = typename FragmentIteratorA1ScaleBias::Fragment; using WarpFragmentB1 = typename Operator1::FragmentB; protected: /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA0 smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B0 operand to shared memory SmemIteratorB0 smem_iterator_B0_; /// Iterator to write threadblock-scoped tile of B1 operand to shared memory SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bMmaPipelined( typename Base::B2bMmaSharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM int thread_idx, ///< ID within the threadblock int warp_idx, ///< ID of warp int lane_idx, ///< ID of each thread within a warp int problem_size_0_n ///< GEMM0 N is used for accumulator extent ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.shared_storage0.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx), smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension //These should stay the same across different GEMM layers int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM; //These may change across different GEMM layers int tile_offset_k_0 = Base::kWarpGemmIterations0 * warp_idx_k; int tile_offset_k_1 = Base::kWarpGemmIterations1 * warp_idx_k; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k_0}); this->warp_tile_iterator_B0_.add_tile_offset({tile_offset_k_0, warp_idx_n}); this->warp_tile_iterator_B1_.add_tile_offset({tile_offset_k_1, warp_idx_n}); } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( int gemm_k_iterations_0, ///< number of iterations of the mainloop FragmentC1 &accum, ///< destination accumulator tile IteratorA0 iterator_A, ///< iterator over A operand in global memory IteratorB0 iterator_B0, ///< iterator over B0 operand in global memory IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand scale vectors in global memory IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over A1 operand bias vectors in global memory IteratorB1 iterator_B1, ///< iterator over B1 operand in global memory FragmentC0 const &src_accum, ///< source accumualtor tile OutputOp output_op_0, ///< epilogue operation after 1st Gemm TransformA0 transform_A0 = TransformA0(), ///< transformation applied to A0 fragment TransformB0 transform_B0 = TransformB0(), ///< transformation applied to B0 fragment TransformB1 transform_B1 = TransformB1()) { ///< transformation applied to B1 fragment // // Prologue // // Perform accumulation in the 'd' output operand FragmentC0 accum0 = src_accum; FragmentA0 tb_frag_A; FragmentB0 tb_frag_B0; tb_frag_A.clear(); tb_frag_B0.clear(); // The last kblock is loaded in the prolog iterator_A.load(tb_frag_A); iterator_B0.load(tb_frag_B0); ++iterator_A; ++iterator_B0; this->smem_iterator_A_.store(transform_A0(tb_frag_A)); this->smem_iterator_B0_.store(transform_B0(tb_frag_B0)); ++this->smem_iterator_A_; ++this->smem_iterator_B0_; __syncthreads(); // Pair of fragments used to overlap shared memory loads and math instructions WarpFragmentA0 warp_frag_A0[2]; WarpFragmentB0 warp_frag_B0[2]; this->warp_tile_iterator_A0_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_A0_.load(warp_frag_A0[0]); this->warp_tile_iterator_B0_.load(warp_frag_B0[0]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; Operator0 warp_mma0; int smem_write_stage_idx = 1; // Avoid reading out of bounds iterator_A.clear_mask(gemm_k_iterations_0 <= 1); iterator_B0.clear_mask(gemm_k_iterations_0 <= 1); // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing // shared memory loads (which have the tightest latency requirement). // // Mainloop // // Note: The main loop does not support Base::kWarpGemmIterations == 2. CUTLASS_GEMM_LOOP for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) { // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group // as the case may be. if (warp_mma_k == Base::kWarpGemmIterations0 - 1) { // Write fragments to shared memory this->smem_iterator_A_.store(transform_A0(tb_frag_A)); this->smem_iterator_B0_.store(transform_B0(tb_frag_B0)); __syncthreads(); ++this->smem_iterator_A_; ++this->smem_iterator_B0_; // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory if (smem_write_stage_idx == 1) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); } else { this->warp_tile_iterator_A0_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0}); } smem_write_stage_idx ^= 1; } this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; if (warp_mma_k == 0) { iterator_A.load(tb_frag_A); iterator_B0.load(tb_frag_B0); ++iterator_A; ++iterator_B0; // Avoid reading out of bounds if this was the last loop iteration iterator_A.clear_mask(gemm_k_iterations_0 <= 2); iterator_B0.clear_mask(gemm_k_iterations_0 <= 2); } warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0); } } //2nd Gemm /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile FragmentIteratorA1 warp_tile_iterator_A1_(accum0); // // Prologue // FragmentA1ScaleBias tb_frag_A1_scale; FragmentA1ScaleBias tb_frag_A1_bias; FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale); FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias); FragmentB1 tb_frag_B1; if(PerChannelScale) tb_frag_A1_scale.clear(); tb_frag_A1_bias.clear(); tb_frag_B1.clear(); // The last kblock is loaded in the prolog if(PerChannelScale) iterator_A1_scale.load(tb_frag_A1_scale); iterator_A1_bias.load(tb_frag_A1_bias); iterator_B1.load(tb_frag_B1); if(PerChannelScale) ++iterator_A1_scale; ++iterator_A1_bias; ++iterator_B1; this->smem_iterator_B1_.store(transform_B1(tb_frag_B1)); ++this->smem_iterator_B1_; __syncthreads(); // Pair of fragments used to overlap shared memory loads and math instructions WarpFragmentA1ScaleBias warp_frag_A1_scale[2]; WarpFragmentA1ScaleBias warp_frag_A1_bias[2]; WarpFragmentA1 warp_frag_A1[2]; WarpFragmentB1 warp_frag_B1[2]; this->warp_tile_iterator_B1_.set_kgroup_index(0); if(PerChannelScale) warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[0]); warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[0]); warp_tile_iterator_A1_.load(warp_frag_A1[0], warp_frag_A1_scale[0], warp_frag_A1_bias[0], output_op_0); this->warp_tile_iterator_B1_.load(warp_frag_B1[0]); ++warp_tile_iterator_A1_; if(PerChannelScale) ++warp_tile_iterator_A1_scale_; ++warp_tile_iterator_A1_bias_; ++this->warp_tile_iterator_B1_; Operator1 warp_mma1; smem_write_stage_idx = 1; int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1; // Avoid reading out of bounds iterator_B1.clear_mask(gemm_k_iterations_1 <= 1); // // Mainloop // // Note: The main loop does not support Base::WarpGemmIterations == 2. CUTLASS_PRAGMA_UNROLL for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) { // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group // as the case may be. if (warp_mma_k == Base::kWarpGemmIterations1 - 1) { // Write fragments to shared memory this->smem_iterator_B1_.store(transform_B1(tb_frag_B1)); __syncthreads(); ++this->smem_iterator_B1_; // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory if (smem_write_stage_idx == 1) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); } else { this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); } smem_write_stage_idx ^= 1; if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; } this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); if(PerChannelScale) warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[(warp_mma_k + 1) % 2]); warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[(warp_mma_k + 1) % 2]); warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2], warp_frag_A1_scale[(warp_mma_k + 1) % 2], warp_frag_A1_bias[(warp_mma_k + 1) % 2], output_op_0); this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]); if(PerChannelScale) ++warp_tile_iterator_A1_scale_; ++warp_tile_iterator_A1_bias_; ++warp_tile_iterator_A1_; ++this->warp_tile_iterator_B1_; if (warp_mma_k == 0) { iterator_B1.load(tb_frag_B1); ++iterator_B1; // Avoid reading out of bounds if this was the last loop iteration iterator_B1.clear_mask(gemm_k_iterations_1 <= 2); } warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum); } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h", "repo_id": "cutlass", "token_count": 8674 }
8
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to fuse activation's per channel scale+bias+relu into the wgrad mainloop. Compared with original fprop kernel, this example has two more vectors, one for the scale and one for the bias. The length of the vectors are the same as the activation channel number. This kernels loads the vectors when the associated activation channels are loaded in the mainloop. Between reading the activations and scale/bias data from the shared memory and calling tensor core instructions, scale+bias+relu is computed in the register file. This example is customized for Ampere 16816 fp16 tensor core instruction. Changing to different data types or different tensor core instruction require source code changing. See include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h for more technical details. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_wgrad_fusion.h" #include "cutlass/conv/device/implicit_gemm_convolution_fusion.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors using ElementOutput = float; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutInputScaleBias = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 5; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination using Conv2dWgradFusionKernel = typename cutlass::conv::kernel::DefaultConv2dWgradFusion< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementInputScaleBias, LayoutInputScaleBias, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv2dWgradFusionKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size, cutlass::MatrixCoord stride) { this->input_size = input_size; this->filter_size = filter_size; conv_stride = stride; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "26_ampere_wgrad_mainloop_fusion example\n\n" << " This example fuses scale+bias+relu of the activation into Ampere's\n" << " Tensor Core operators on F16 data types to compute\n" << " backward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n" << "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << options.conv_stride.row() << "," << options.conv_stride.column() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size); cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_b(options.input_size); cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_b_scale({1, options.input_size.c()}); cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_b_bias({1, options.input_size.c()}); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(3), ElementInputA(-4), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill scale vector for tensor B on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_b_scale.host_view(), 1, ElementInputA(3), ElementInputA(-4), 0); // Fill bias vector for tensor B on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_b_bias.host_view(), 1, ElementInputA(3), ElementInputA(-4), 0); // Fill tensor C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(7), ElementOutput(-8), 0); // Fill tensor D on host with zeros cutlass::reference::host::TensorFill( tensor_d.host_view()); // Fill tensor D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_b_scale.sync_device(); tensor_b_bias.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); typename ImplicitGemmFusion::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_b_scale.device_ref(), tensor_b_bias.device_ref(), tensor_c.device_ref(), tensor_d.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemmFusion implicit_gemm_fusion_op; size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_fusion_op.can_implement(arguments); CUTLASS_CHECK(result.status); result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_fusion_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on device...\n"; // Compute scale + bias + relu in host code for (int n = 0; n < options.input_size.n(); ++n) { for (int h = 0; h < options.input_size.h(); ++h) { for (int w = 0; w < options.input_size.w(); ++w) { for (int c = 0; c < options.input_size.c(); ++c) { tensor_transformed_b.at({n, h, w, c}) = std::max( ElementOutput(0), ElementOutput(tensor_b.at({n, h, w, c}) * tensor_b_scale.at({0, c}) + tensor_b_bias.at({0, c}))); } } } } tensor_transformed_b.sync_device(); // Compute with reference implementation cutlass::reference::device::Conv2dWgrad< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue> >( problem_size, tensor_a.device_ref(), tensor_transformed_b.device_ref(), tensor_c.device_ref(), tensor_ref_d.device_ref(), options.alpha, options.beta ); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_d.sync_host(); tensor_ref_d.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "26_ampere_wgrad_mainloop_fusion_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_fusion_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major == 8 && props.minor == 0)) { std::cerr << "This test must run on SM80 A100.\n"; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {34, 408}; struct Benchmark { int h, w, c, k, r, s, stride_h, stride_w; } layers[] = { {56, 56, 64, 256, 1, 1, 1, 1}, {56, 56, 64, 64, 1, 1, 1, 1}, {56, 56, 64, 64, 3, 3, 1, 1}, {56, 56, 256, 64, 1, 1, 1, 1}, {56, 56, 256, 512, 1, 1, 2, 2}, {56, 56, 256, 128, 1, 1, 1, 1}, {56, 56, 128, 128, 3, 3, 2, 2}, {28, 28, 128, 512, 1, 1, 1, 1}, {28, 28, 512, 128, 1, 1, 1, 1}, {28, 28, 128, 128, 3, 3, 1, 1}, {28, 28, 512, 1024, 1, 1, 2, 2}, {28, 28, 512, 256, 1, 1, 1, 1}, {28, 28, 256, 256, 3, 3, 2, 2}, {14, 14, 256, 1024, 1, 1, 1, 1}, {14, 14, 1024, 256, 1, 1, 1, 1}, {14, 14, 256, 256, 3, 3, 1, 1}, {14, 14, 1024, 2048, 1, 1, 2, 2}, {14, 14, 1024, 512, 1, 1, 1, 1}, {14, 14, 512, 512, 3, 3, 2, 2}, { 7, 7, 512, 2048, 1, 1, 1, 1}, { 7, 7, 2048, 512, 1, 1, 1, 1}, { 7, 7, 512, 512, 3, 3, 1, 1}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}, {layer.stride_h, layer.stride_w}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu/0
{ "file_path": "cutlass/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu", "repo_id": "cutlass", "token_count": 9921 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Contains additional metadata about layout permute functions used in the example. */ #include "cutlass/tensor_coord.h" #include "cutlass/layout/permute.h" /// Additional permutation metadata to facilitate testing/printing template<typename PermuteLayout> struct PermuteInfo; /// Specialization for default case (no permute). Other specializations must follow this template. template<> struct PermuteInfo<cutlass::layout::NoPermute> { /// Whether this is a BMM or GEMM permutation (NoPermute can actually be either) static bool constexpr kBatched = false; /// Minimal divisor for row extent static int constexpr kRowFactor = 1; /// Minimum divisor for column extent static int constexpr kColumnFactor = 1; /// Minimum divisor for batch size dimension static int constexpr kBatchFactor = 1; /// Tensor layout used in permutation operation using Layout = cutlass::layout::PackedVectorLayout; static std::string name() { return "NoPermute"; } /// User-friendly description of the permute operation static std::string desc() { return "no permutation"; } /// Infer original higher-rank tensor shape from GEMM/BMM matrix extents. /// For direct (output) permutations, must be a simple reshape of extent. /// For inverse (input) permutations, must return shape *before* permute operation. /// In case of NoPermute, simply use a linear (rank 1) view of the memory static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { return Layout::TensorCoord(extent.row() * extent.column() * batch_count); } /// Compute the permuted higher-rank tensor shape from the original shape. static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return s; } }; template<int D1> struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> { static bool constexpr kBatched = true; static int constexpr kRowFactor = 1; static int constexpr kColumnFactor = 1; static int constexpr kBatchFactor = D1; using Layout = cutlass::layout::TensorNHWC; static std::string name() { return "Tensor4DPermuteBMM0213<" + std::to_string(D1) + ">"; } static std::string desc() { return "batched GEMM permutation [0, 2, 1, 3]"; } static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = batch_count / D1; int D2 = extent.row(); int D3 = extent.column(); return {D0, D1, D2, D3}; } static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return {s[0], s[2], s[1], s[3]}; } }; template<int D1> struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D1>> : public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> { static bool constexpr kBatched = true; static int constexpr kRowFactor = 1; static int constexpr kColumnFactor = D1; static int constexpr kBatchFactor = 1; using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>>; using Layout = typename Base::Layout; static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = batch_count; int D2 = extent.row(); int D3 = extent.column() / D1; return {D0, D1, D2, D3}; } }; template<int D1> struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> { static bool constexpr kBatched = true; static int constexpr kRowFactor = 1; static int constexpr kColumnFactor = 1; static int constexpr kBatchFactor = D1; using Layout = cutlass::layout::TensorNHCW; static std::string name() { return "Tensor4DPermuteBMM0321<" + std::to_string(D1) + ">"; } static std::string desc() { return "batched GEMM permutation [0, 3, 2, 1]"; } static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = batch_count / D1; int D2 = extent.row(); int D3 = extent.column(); return {D0, D1, D2, D3}; } static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return {s[0], s[3], s[2], s[1]}; } }; template<int D1> struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D1>> : public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> { static bool constexpr kBatched = true; static int constexpr kRowFactor = D1; static int constexpr kColumnFactor = 1; static int constexpr kBatchFactor = 1; using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>>; using Layout = typename Base::Layout; static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = batch_count; int D2 = extent.row() / D1; int D3 = extent.column(); return {D0, D1, D2, D3}; } }; template<int D1, int D2> struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = D1; static int constexpr kColumnFactor = D2; static int constexpr kBatchFactor = 1; using Layout = cutlass::layout::TensorNHWC; static std::string name() { return "Tensor4DPermute0213<" + std::to_string(D1) + "," + std::to_string(D2) + ">"; } static std::string desc() { return "normal GEMM permutation [0, 2, 1, 3]"; } static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = extent.row() / D1; int D3 = extent.column() / D2; return {D0, D1, D2, D3}; } static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return {s[0], s[2], s[1], s[3]}; } }; template<int D1, int D2> struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>> : public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = D2; static int constexpr kColumnFactor = D1; static int constexpr kBatchFactor = 1; using Base = PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>>; using Layout = typename Base::Layout; static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int D0 = extent.row() / D2; int D3 = extent.column() / D1; return {D0, D1, D2, D3}; } }; template<int D1, int D2> struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>> : public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> { using Layout = cutlass::layout::TensorCWHN; }; template<int D1, int D2> struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>> : public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>> { using Layout = cutlass::layout::TensorCWHN; }; template<int T1, int T2, int T3> struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = T1; static int constexpr kColumnFactor = T2 * T3; static int constexpr kBatchFactor = 1; using Layout = cutlass::layout::TensorNDHWC; static std::string name() { return "Tensor5DPermute20314<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">"; } static std::string desc() { return "normal GEMM permutation [2, 0, 3, 1, 4]"; } static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int const T0 = extent.row() / T1; int const T4 = extent.column() / (T2 * T3); return {T0, T1, T2, T3, T4}; } static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return {s[2], s[0], s[3], s[1], s[4]}; } }; template<int T1, int T2, int T3> struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajorInverse<T1, T2, T3>> : public PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = T2; static int constexpr kColumnFactor = T1 * T3; static int constexpr kBatchFactor = 1; using Base = PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>>; using Layout = typename Base::Layout; static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int const T0 = extent.row() / T2; int const T4 = extent.column() / (T1 * T3); return {T0, T1, T2, T3, T4}; } }; template<int T1, int T2, int T3> struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = T1; static int constexpr kColumnFactor = T2 * T3; static int constexpr kBatchFactor = 1; using Layout = cutlass::layout::TensorCWHDN; static std::string name() { return "Tensor5DPermute02413<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">"; } static std::string desc() { return "normal GEMM permutation [0, 2, 4, 1, 3]"; } using Coord = cutlass::Tensor5DCoord; static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int const T0 = extent.row() / T1; int const T4 = extent.column() / (T2 * T3); return {T0, T1, T2, T3, T4}; } static Layout::TensorCoord permute(Layout::TensorCoord const &s) { return {s[0], s[2], s[4], s[1], s[3]}; } }; template<int T1, int T2, int T3> struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>> : public PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> { static bool constexpr kBatched = false; static int constexpr kRowFactor = T2; static int constexpr kColumnFactor = T1 * T3; static int constexpr kBatchFactor = 1; using Base = PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>>; using Layout = typename Base::Layout; static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) { int const T0 = extent.row() / T2; int const T4 = extent.column() / (T1 * T3); return {T0, T1, T2, T3, T4}; } };
cutlass/examples/39_gemm_permute/permute_info.h/0
{ "file_path": "cutlass/examples/39_gemm_permute/permute_info.h", "repo_id": "cutlass", "token_count": 4266 }
10
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import argparse import torch import sys import os from piped_subprocess import PipedSubprocess, TORCH_DTYPE_NAME import math parser = argparse.ArgumentParser() parser.add_argument("example_exe", type=str, help="Path to the 41_fused_multi_head_attention_backward executable") args = parser.parse_args() torch.manual_seed(0) dtype = torch.float16 B, Mq, Mkv, H, K, Kv = 2, 1024, 1024, 5, 128, 128 causal = True repeat_count = 100 ATOL = { torch.float: 5e-4, torch.half: 9.5e-2, torch.bfloat16: 7e-1, }[dtype] RTOL = { torch.float: 1e-4, torch.half: 2e-2, torch.bfloat16: 1e-1, }[dtype] assert not (causal and Mq < Mkv), "causal only supports seqlenK <= seqlenQ" fmha_bw_binary = args.example_exe if not os.path.isfile(fmha_bw_binary): print(f"""No such file: `{fmha_bw_binary}`\nDid you forget to run "make 41_fused_multi_head_attention"?""") sys.exit(1) def create_lower_triangular_mask(): return torch.triu(torch.full( # type: ignore [1, Mq, Mkv], dtype=dtype, fill_value=float("-inf"), ), diagonal=1) def ref_mha_bmk(q, k, v, mask): # Multi-head attention with inputs/outputs in BMK format q = q.float() k = k.float() v = v.float() q = q * (1 / q.shape[-1] ** 0.5) attn = q @ k.transpose(-2, -1) if mask is not None: attn += mask attn_max = attn.max(-1, True).values attn_norm = (attn - attn_max).exp().sum(-1, True) attn = attn.softmax(-1) lse = attn_max + attn_norm.log() lse = lse.squeeze(2) return attn @ v, lse def bmhk2bmk(t): return t.permute((0, 2, 1, 3)).reshape( [t.shape[0] * t.shape[2], t.shape[1], t.shape[3]] ) def ref_mha_bmhk(q, k, v, mask): # Multi-head attention with inputs/outputs in BMHK format assert q.ndim == 4 out, lse = ref_mha_bmk(bmhk2bmk(q), bmhk2bmk(k), bmhk2bmk(v), mask=mask) out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]]) return out.permute((0, 2, 1, 3)), lse.reshape([q.shape[0], q.shape[2], q.shape[1]]) def ref_mha_bw_bmhk(q, k, v, mask, lse, out, grad_out, delta): lse = lse[:, :, :q.shape[1]] #BMH, unpad Q dimension delta = delta.reshape([-1, delta.shape[-1], 1]) # bmhk -> bmk q, k, v, out, grad_out = [bmhk2bmk(x).float() for x in (q, k, v, out, grad_out)] attn_T = k @ q.transpose(-2, -1) if mask is not None: attn_T += mask.transpose(-2, -1) attn_T = attn_T * (1 / q.shape[-1] ** 0.5) attn_T = attn_T - lse.reshape([-1, 1, lse.shape[-1]]) attn_T = attn_T.exp() grad_v = attn_T @ grad_out dov = grad_out @ v.transpose(-2, -1) tmp = (dov - delta) * attn_T.transpose(-2, -1) tmp = tmp / (q.shape[-1] ** 0.5) grad_q = tmp @ k grad_k = tmp.transpose(-2, -1) @ q return [x.reshape([B, H, x.shape[1], x.shape[-1]]).permute([0, 2, 1, 3]) for x in [grad_q, grad_k, grad_v]] print("initializing tensors...") query = torch.randn([B, Mq, H, K], dtype=dtype) key = 3 * torch.randn([B, Mkv, H, K], dtype=dtype) value = 3 * torch.randn([B, Mkv, H, Kv], dtype=dtype) mask = create_lower_triangular_mask() if causal else None # let PyTorch compute gradients query.requires_grad_(True) key.requires_grad_(True) value.requires_grad_(True) print("computing fw...") out, lse = ref_mha_bmhk(query, key, value, mask=mask) out = out.to(dtype).contiguous() grad_out = 3 * torch.randn([B, Mq, H, Kv], dtype=dtype) print("computing bw with autograd...") out.backward(grad_out) scale = (1 / query.shape[-1] ** 0.5) # Additional data needed by the kernel delta = (grad_out.float() * out.float()).sum(-1).transpose(-2, -1).contiguous() pad_amount = (32 - (lse.shape[2] % 32)) % 32 lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf) print("computing bw with reference implem...") gQr, gKr, gVr = ref_mha_bw_bmhk(query, key, value, mask, lse, out, grad_out, delta) with PipedSubprocess(fmha_bw_binary) as bw_kernel: # Send kernel arguments bw_kernel.write( TORCH_DTYPE_NAME[query.dtype], "scale", scale, "head_dim", K, "head_dim_value", Kv, "num_queries", Mq, "num_keys", Mkv, "num_heads", H, "custom_mask_type", (1 if causal else 0), "num_batches", B, "repeat_count", repeat_count, "num_splits_key", (Mkv // 128), ) bw_kernel.writeTensor(query, "query", ["q_strideB", "q_strideM", "q_strideH"]) bw_kernel.writeTensor(key, "key", ["k_strideB", "k_strideM", "k_strideH"]) bw_kernel.writeTensor(value, "value", ["v_strideB", "v_strideM", "v_strideH"]) bw_kernel.writeTensor(lse, "logsumexp", ["lse_strideB", "lse_strideH"]) bw_kernel.writeTensor(out, "output", ["o_strideB", "o_strideM", "o_strideH"]) bw_kernel.writeTensor(grad_out, "grad_output", ["gO_strideB", "gO_strideM", "gO_strideH"]) bw_kernel.writeTensor(delta, "delta", ["delta_strideB", "delta_strideH"]) if bw_kernel.read() != "OK": print("Got unexpected output") print(bw_kernel.subp.communicate()[0]) sys.exit(0) # Read kernel output gQ = bw_kernel.readTensor("grad_query", ["gQ_strideB", "gQ_strideM", "gQ_strideH"], query.shape).float() gK = bw_kernel.readTensor("grad_key", ["gK_strideB", "gK_strideM", "gK_strideH"], key.shape).float() gV = bw_kernel.readTensor("grad_value", ["gV_strideB", "gV_strideM", "gV_strideH"], value.shape).float() runtime_ms = float(bw_kernel.readNamed("runtime_ms")) float_ops = B * H * sum([ # att = Q @ K.transpose Mq * Mkv * K * 2, # att @ dO Mkv * Mq * Kv * 2, # dov = dO @ V Mq * Kv * Mkv * 2, # dov @ K Mq * K * Mkv * 2, # dov @ Q Mq * K * Mkv * 2, ]) if causal: float_ops //= 2 print(f""" Fused multi-head attention - backward batch_size={B} num_queries={Mq} num_keys={Mkv} num_heads={H} head_dim={K} head_dim_value={Kv} Correctness: grad_query: {"PASS" if torch.allclose(gQ, gQr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gQ - gQr).abs().max()}) grad_key: {"PASS" if torch.allclose(gK, gKr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gK - gKr).abs().max()}) grad_value: {"PASS" if torch.allclose(gV, gVr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gV - gVr).abs().max()}) (atol={ATOL} / rtol={RTOL}) Runtime: {runtime_ms}ms ({(float_ops / (1024 ** 4)) / (runtime_ms / 1000):.4f} TFlops) """) assert torch.allclose(query.grad.float(), gQr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!" assert torch.allclose(key.grad.float(), gKr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!" assert torch.allclose(value.grad.float(), gVr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
cutlass/examples/41_fused_multi_head_attention/fmha_backward_test.py/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/fmha_backward_test.py", "repo_id": "cutlass", "token_count": 3659 }
11
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/pitch_linear.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename Element_, int ElementsPerAccess > struct DefaultThreadMapTensorOpForFusedBias { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; // // Definitions // struct Detail { /// Tensor Operations fundamentally perform operations on 8 rows static int const kTensorOpRows = 8; static int const kWarpSize = 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kM % WarpShape::kM), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMapBiasAct < OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>, OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value >; }; /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h", "repo_id": "cutlass", "token_count": 1126 }
12
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# def type_2_cutlass_type(input_type = "fp16"): # float point type if input_type == "fp32": return "float" if input_type == "bf16": return "cutlass::bfloat16_t" if input_type == "fp16": return "cutlass::half_t" # integer type if(input_type == "int32"): return "int32_t" if(input_type == "int8"): return "int8_t" if input_type == 'Row': return 'cutlass::layout::RowMajor' if input_type == 'Col': return 'cutlass::layout::ColumnMajor' def cvt_2_cutlass_shape(gemm_shape): # gemm shape if len(gemm_shape) == 3: val = "cutlass::gemm::GemmShape<" \ + str(gemm_shape[0]) + ", " \ + str(gemm_shape[1]) + ", " \ + str(gemm_shape[2]) + ">" return val def write_2_headfile(filename, file_dir, string): with open(file_dir + filename, 'w') as f: f.write("/* Auto Generated code - Do not edit.*/\n\n\n#pragma once\n" + string) def var_idx(varaiable, index): return varaiable + str(index) def list_2_string(input_list, ): rtn_string = "" cnt = 0 for element in input_list: final = ", \n" if cnt == len(input_list) - 1: final = "\n" cnt += 1 rtn_string += str(element) + final return rtn_string def get_epilogue_info(layer_info): return layer_info['epilogue'] def get_epilogue_tp(layer_info): epilogue_info = get_epilogue_info(layer_info) return epilogue_info['tp'] def get_epilogue_add_bias_or_not(layer_info): epilogue_info = get_epilogue_info(layer_info) return epilogue_info['bias']['addbias'] def get_epilogue_add_bias_tp(layer_info): epilogue_info = get_epilogue_info(layer_info) return epilogue_info['bias']['bias_tp'] def get_epilogue_args(layer_info): epilogue_info = get_epilogue_info(layer_info) return epilogue_info['args'] def get_epilogue_bias_shape(layer_info): bias_tp = get_epilogue_add_bias_tp(layer_info).lower() mn_shape = layer_info['mnk'][:-1] if bias_tp == 'mat': mn_shape[0] = 'M' return mn_shape elif bias_tp == 'vec': mn_shape[0] = 1 return mn_shape else: assert(0) def get_epilogue_bias_ldm(layer_info): bias_tp = get_epilogue_add_bias_tp(layer_info).lower() mn_shape = layer_info['mnk'][:-1] c_layout = layer_info['C_format'].lower() if c_layout != 'row': assert(0) if bias_tp == 'mat': return mn_shape[1] elif bias_tp == 'vec': return 0 else: assert(0) def get_epilogue_compute_tp(layer_info): return layer_info['Acc_tp']
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py", "repo_id": "cutlass", "token_count": 1814 }
13
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run depthwise 2d convolution kernels using functions and data structures provided by CUTLASS using SIMT instruction; There are 3 types of implementations of depthwise 2d convoltion 1. kAnalytic Implicit gemm 2d convoltion algorithm. 2. kOptimized An optimized algorithm and supports arbitrary stride and dilation. 3. kFixedStrideDilation An optimized algorithm with fixed stride and dilation to reduce the runtime computation and do more optimizations. In general, the perf of kFixedStrideDilation would be better than kOptimized. However, if the filter size, stride or dilation is large, it would encounter register spilling and may hurt the perf. If in this case, please use kOptimized. For kOptimized and kFixedStrideDilation, in order to fully utilize GPU hardware resources and achieve better perf, when the output tensor size is large, splitk should be enabled to achieve better perf. In this example, it demonstrates how to construct and run a FixedStrideDilation depthwise 2d convolution kernel. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_depthwise_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/conv/device/direct_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = cutlass::half_t; // Data type of accumulator using ElementComputeEpilogue = cutlass::half_t; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = cutlass::half_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 64; // This code section describes the output tile <N, O, P, Q> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<3, 3>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes // WarpShape::kM = P * Q the warps would process // WarpShape::kN = groups_per_cta that the warps would process // WarpShape::kK = filter_size that the warps would process using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 4; // This code section describe iterator algorithm selected is kFixedStrideDilation static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kFixedStrideDilation; using StrideShape = cutlass::MatrixShape<1, 1>; using DilationShape = cutlass::MatrixShape<1, 1>; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling>; // Epilogue scaling operation. using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kFixed, StrideShape, DilationShape>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; int groups; int splitk; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; std::string tag; Options() : help(false), input_size(1, 128, 128, 32), filter_size(32, 3, 3, 1), groups(32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), splitk(1) {} // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // depthwise conv if (groups != input_size.c()) { return false; } if (filter_size.n() != groups) { return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } // Filter size passed through command line does not match filter size template parameter if (filter_size.h() != FilterShape::kRow || filter_size.w() != FilterShape::kColumn) { std::cerr << "Filter size passed in (" << filter_size.h() << "x" << filter_size.w() << ") " << "must match the FilterShape template parameter of the convolution " << "(" << FilterShape::kRow << "x" << FilterShape::kColumn << "). " << "To use the filter shape passed in, change the FilterShape template " << "parameter and recompile this example." << std::endl; return false; } return true; } /// Updates input and filter sizes void update(cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); cmd.get_cmd_line_argument("g", groups); filter_size.c() = 1; filter_size.n() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("splitk", splitk); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); int32_t padding_h = filter_size.h() / 2; int32_t padding_w = filter_size.w() / 2; padding = {padding_h, padding_h, padding_w, padding_w}; } /// Prints the usage statement. std::ostream &print_usage(std::ostream &out) const { out << "46_depthwise_gemm_fprop example\n\n" << " This example uses Ampere's Tensor Core operators on F16 data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --g=<int> Groups\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --splitk=<int> Enable splitK\n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results " "table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=32 " "--h=224 --w=224 --c=128 --k=128 --g=128 --r=3 --s=3\n\n" << "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=1 " "--h=224 --w=224 --c=32 --k=32 --g=32 --r=3 --s=3 --splitk=10 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result() : runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) {} static std::ostream &print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,G,stride_h,stride_w,dilation_h,dilation_w,splitK,Runtime,GFLOPs"; return out; } std::ostream &print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } cutlass::Tensor4DCoord output_size = options.output_size(); out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << options.groups << "," << options.conv_stride.row() << "," << options.conv_stride.column() << "," << options.dilation.row() << "," << options.dilation.column() << "," << options.splitk << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one testcase Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b_transpose(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(5), ElementInputA(-6), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(3), ElementInputB(-6), 0); // Fill tensor C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(5), ElementOutput(-6), 0); // Fill tensor D on host with zeros cutlass::reference::host::TensorFill(tensor_d.host_view()); // Fill tensor D for reference on host with zeros cutlass::reference::host::TensorFill(tensor_ref_d.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_b_transpose.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split P*Q into multiple CTA int split_k_slices = options.splitk; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size(options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices, options.groups); // Construct Direc2dConv::Argument structure with conv2d // problem size, data pointers, and epilogue values typename Direct2dConv::Arguments arguments{problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_d.device_ref(), {options.alpha, options.beta}, tensor_b_transpose.device_ref()}; // // Initialize CUTLASS Convolution // Direct2dConv implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator >(problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_d.host_ref(), options.alpha, options.beta); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_d.sync_host(); bool passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "46_depthwise_simt_conv2dfprop" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto &event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { bool notSupported = false; cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 6)) { std::cerr << "Run on a machine with compute capability at least 60." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu/0
{ "file_path": "cutlass/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu", "repo_id": "cutlass", "token_count": 9564 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/numeric/math.hpp" namespace example { // Naive grid-stride loop implementation of gather template<typename Element, typename Func> __global__ void gather_kernel(Element const * __restrict__ input, Element * __restrict__ output, Func func, int num_elems_input, int num_elems_output, cutlass::FastDivmod stride_divmod) { Element const * input_b = input + blockIdx.z * num_elems_input; Element * output_b = output + blockIdx.z * num_elems_output; int tidx = threadIdx.x + blockIdx.x * blockDim.x; for (int k = tidx; k < num_elems_output; k += blockDim.x * gridDim.x) { int i,j; stride_divmod(j, i, k); output_b[k] = input_b[i + func(j) * stride_divmod.divisor]; } } // Gather elements along strided dimension of the tensor according to given indices template<typename Element, typename Func> void gather(Element const * input, Element * output, Func func, int batch_size, int num_elems_input, int num_elems_output, int stride, cutlass::KernelHardwareInfo const& hw_info) { // Upcast to uint128_t data type int factor = 128 / cutlass::sizeof_bits<Element>::value; assert(stride % factor == 0); int stride_upcast = stride/factor; int num_elems_input_upcast = num_elems_input / factor; int num_elems_output_upcast = num_elems_output / factor; cutlass::FastDivmod stride_divmod(stride_upcast); dim3 blocks(hw_info.sm_count, 1, batch_size); gather_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input), reinterpret_cast<cute::uint128_t *>(output), func, num_elems_input_upcast, num_elems_output_upcast, stride_divmod); } // Naive grid-stride loop implementation of scatter template<typename Element, typename Func> __global__ void scatter_kernel(Element const * __restrict__ input, Element * __restrict__ output, Func func, int num_elems_input, int num_elems_output, cutlass::FastDivmod stride_divmod) { Element const * input_b = input + blockIdx.z * num_elems_input; Element * output_b = output + blockIdx.z * num_elems_output; int tidx = threadIdx.x + blockIdx.x * blockDim.x; for (int k = tidx; k < num_elems_input; k += blockDim.x * gridDim.x) { int i,j; stride_divmod(j, i, k); output_b[i + func(j) * stride_divmod.divisor] = input_b[k]; } } // Gather elements along strided dimension of the tensor according to given indices template<typename Element, typename Func> void scatter(Element const * input, Element * output, Func func, int batch_size, int num_elems_input, int num_elems_output, int stride, cutlass::KernelHardwareInfo const& hw_info) { // Upcast to uint128_t data type int factor = 128 / cutlass::sizeof_bits<Element>::value; assert(stride % factor == 0); int stride_upcast = stride/factor; int num_elems_input_upcast = num_elems_input / factor; int num_elems_output_upcast = num_elems_output / factor; cutlass::FastDivmod stride_divmod(stride_upcast); dim3 blocks(hw_info.sm_count, 1, batch_size); scatter_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input), reinterpret_cast<cute::uint128_t *>(output), func, num_elems_input_upcast, num_elems_output_upcast, stride_divmod); } } // namespace example
cutlass/examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh/0
{ "file_path": "cutlass/examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh", "repo_id": "cutlass", "token_count": 2200 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <cstdlib> #include <cstdio> #include <cassert> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "cutlass/util/helper_cuda.hpp" template <class ProblemShape, class CtaTiler, class TA, class AStride, class ASmemLayout, class TiledCopyA, class TB, class BStride, class BSmemLayout, class TiledCopyB, class TC, class CStride, class CSmemLayout, class TiledMma, class Alpha, class Beta> __global__ static __launch_bounds__(decltype(size(TiledMma{}))::value) void gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler, TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a, TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b, TC * C, CStride dC, CSmemLayout , TiledMma mma, Alpha alpha, Beta beta) { using namespace cute; // Preconditions CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K) CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K) CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads static_assert(is_static<ASmemLayout>::value); static_assert(is_static<BSmemLayout>::value); static_assert(is_static<CSmemLayout>::value); CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN // // Full and Tiled Tensors // // Represent the full tensors Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K) Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K) Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N) // Get the appropriate blocks for this thread block auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k) Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N) // Shared memory buffers __shared__ TA smemA[cosize_v<ASmemLayout>]; __shared__ TB smemB[cosize_v<BSmemLayout>]; Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K) Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K) // // Partition the copying of A and B tiles across the threads // // TUTORIAL: Example of partitioning via a TiledCopy ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x); Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k) Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K) Tensor tArA = make_fragment_like(tAsA); // (CPY,CPY_M,CPY_K) ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x); Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k) Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K) Tensor tBrB = make_fragment_like(tBsB); // (CPY,CPY_N,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tArA)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tArA)); // CPY_K CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBrB)); // CPY_N CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBrB)); // CPY_K // Copy gmem to rmem for k_tile=0 copy(copy_a, tAgA(_,_,_,0), tArA); copy(copy_b, tBgB(_,_,_,0), tBrB); // // Define A/B partitioning and C accumulators // // TUTORIAL: Example of partitioning via a TiledMMA ThrMMA thr_mma = mma.get_slice(threadIdx.x); Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K) Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K) Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N) // Allocate registers for pipelining Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K) // Allocate the accumulators -- same size as the projected data Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N) CUTE_STATIC_ASSERT_V( shape(tCrA) == shape(tCsA)); // (MMA,MMA_M,MMA_K) CUTE_STATIC_ASSERT_V( shape(tCrB) == shape(tCsB)); // (MMA,MMA_N,MMA_K) CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N) CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K // Clear the accumulators clear(tCrC); #if 0 if(thread0()) { print(" mA : "); print( mA); print("\n"); print(" gA : "); print( gA); print("\n"); print(" sA : "); print( sA); print("\n"); print("tAgA : "); print(tAgA); print("\n"); print("tAsA : "); print(tAsA); print("\n"); print("tArA : "); print(tArA); print("\n"); } #endif #if 0 if(thread0()) { print(" mB : "); print( mB); print("\n"); print(" gB : "); print( gB); print("\n"); print(" sB : "); print( sB); print("\n"); print("tBgB : "); print(tBgB); print("\n"); print("tBsB : "); print(tBsB); print("\n"); print("tArA : "); print(tArA); print("\n"); } #endif #if 0 if(thread0()) { print(" mC : "); print( mC); print("\n"); print(" gC : "); print( gC); print("\n"); print("tCsA : "); print(tCsA); print("\n"); print("tCsB : "); print(tCsB); print("\n"); print("tCgC : "); print(tCgC); print("\n"); print("tCrC : "); print(tCrC); print("\n"); } #endif #if 1 // Copy rmem to smem copy(tArA, tAsA); copy(tBrB, tBsB); __syncthreads(); // // PIPELINED MAIN LOOP // TUTORIAL: Example of a gemm loop that pipelines shared memory AND register memory // Data is read from global to registers, then to shared via the tA|tB partitions // Data is then copied from shared to registers in multiple waves via the tC partitions // and gemm(.) operates on the current register wave // // Load A, B shmem->regs for k_block=0 copy(tCsA(_,_,0), tCrA(_,_,0)); copy(tCsB(_,_,0), tCrB(_,_,0)); auto K_TILE_MAX = size<3>(tAgA); auto K_BLOCK_MAX = size<2>(tCrA); CUTE_NO_UNROLL for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile) { // Pipeline the k-mode of the block registers CUTE_UNROLL for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) { if (k_block == K_BLOCK_MAX - 1) { // Copy rmem to smem __syncthreads(); copy(tArA, tAsA); copy(tBrB, tBsB); __syncthreads(); } // Copy smem to rmem for k_block+1 int k_block_next = (k_block + 1) % K_BLOCK_MAX; copy(tCsA(_,_,k_block_next), tCrA(_,_,k_block_next)); copy(tCsB(_,_,k_block_next), tCrB(_,_,k_block_next)); if (k_block == 0) { // Copy gmem to rmem for k_tile+1 int k_tile_next = (k_tile + 1 < K_TILE_MAX) ? k_tile + 1 : k_tile; copy(copy_a, tAgA(_,_,_,k_tile_next), tArA); copy(copy_b, tBgB(_,_,_,k_tile_next), tBrB); } // Thread-level register gemm for k_block gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC); } // k_block } // k_tile #endif // // Epilogue // axpby(alpha, tCrC, beta, tCgC); } // Setup params for a NT GEMM template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_nt(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define NT strides (mixed) auto dA = make_stride(Int<1>{}, ldA); // (dM, dK) auto dB = make_stride(Int<1>{}, ldB); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) // Define the smem layouts (static) auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major // Define the thread layouts (static) TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TA>{}, Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major Layout<Shape< _4,_1>>{}); // Val layout 4x1 m-major TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TB>{}, Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major Layout<Shape< _4,_1>>{}); // Val layout 4x1 n-major TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{}, Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA #if 0 print(copyA); print(copyB); print(mmaC); #endif #if 0 print_latex(copyA); print_latex(copyB); print_latex(mmaC); #endif dim3 dimBlock(size(mmaC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, copyA, B, dB, sB, copyB, C, dC, sC, mmaC, alpha, beta); } // Setup params for a TN GEMM template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_tn(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define TN strides (mixed) auto dA = make_stride(ldA, Int<1>{}); // (dM, dK) auto dB = make_stride(ldB, Int<1>{}); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) // Define the smem layouts (static) auto sA = make_layout(make_shape ( bM, bK), make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major auto sB = make_layout(make_shape ( bN, bK), make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx // Define the thread layouts (static) TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<TA>, TA>{}, Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major Layout<Shape< _1,_1>>{}); // Val layout 1x1 TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<TB>, TB>{}, Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major Layout<Shape< _1,_1>>{}); // Val layout 1x1 TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{}, Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA #if 0 print(copyA); print(copyB); print(mmaC); #endif #if 0 print_latex(copyA); print_latex(copyB); print_latex(mmaC); #endif dim3 dimBlock(size(mmaC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, copyA, B, dB, sB, copyB, C, dC, sC, mmaC, alpha, beta); } template <class TA, class TB, class TC, class Alpha, class Beta> void gemm(char transA, char transB, int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { if (transA == 'N' && transB == 'T') { return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } else if (transA == 'T' && transB == 'N') { return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } assert(false && "Not implemented"); } int main(int argc, char** argv) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major < 7) { std::cout << "This example requires an Volta GPU or newer (CC >= 70)" << std::endl; // Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits. return 0; } int m = 5120; if (argc >= 2) sscanf(argv[1], "%d", &m); int n = 5120; if (argc >= 3) sscanf(argv[2], "%d", &n); int k = 4096; if (argc >= 4) sscanf(argv[3], "%d", &k); char transA = 'N'; if (argc >= 5) sscanf(argv[4], "%c", &transA); char transB = 'T'; if (argc >= 6) sscanf(argv[5], "%c", &transB); using TA = float; using TB = float; using TC = float; using TI = float; TI alpha = 1.0; TI beta = 0.0; std::cout << "M = " << m << std::endl; std::cout << "N = " << n << std::endl; std::cout << "K = " << k << std::endl; std::cout << "C = A^" << transA << " B^" << transB << std::endl; thrust::host_vector<TA> h_A(m*k); thrust::host_vector<TB> h_B(n*k); thrust::host_vector<TC> h_C(m*n); for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1); thrust::device_vector<TA> d_A = h_A; thrust::device_vector<TB> d_B = h_B; thrust::device_vector<TC> d_C = h_C; double gflops = (2.0*m*n*k) * 1e-9; const int timing_iterations = 100; GPU_Clock timer; int ldA = 0, ldB = 0, ldC = m; if (transA == 'N') { ldA = m; } else if (transA == 'T') { ldA = k; } else { assert(false); } if (transB == 'N') { ldB = k; } else if (transB == 'T') { ldB = n; } else { assert(false); } // Run once d_C = h_C; gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); CUTE_CHECK_LAST(); thrust::host_vector<TC> cute_result = d_C; // Timing iterations timer.start(); for (int i = 0; i < timing_iterations; ++i) { gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); } double cute_time = timer.seconds() / timing_iterations; CUTE_CHECK_LAST(); printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000); return 0; }
cutlass/examples/cute/tutorial/sgemm_sm70.cu/0
{ "file_path": "cutlass/examples/cute/tutorial/sgemm_sm70.cu", "repo_id": "cutlass", "token_count": 9373 }
16
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
37
Edit dataset card