text
stringlengths 27
947k
| id
stringlengths 18
126
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
get_filename_component(NvidiaCutlass_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
include(CMakeFindDependencyMacro)
if(TARGET nvidia::cutlass::CUTLASS)
return()
endif()
include("${NvidiaCutlass_CMAKE_DIR}/NvidiaCutlassTargets.cmake")
| cutlass/cmake/NvidiaCutlassConfig.cmake.in/0 | {
"file_path": "cutlass/cmake/NvidiaCutlassConfig.cmake.in",
"repo_id": "cutlass",
"token_count": 97
} | 0 |
var searchData=
[
['quiet_5fnan',['quiet_NaN',['../structstd_1_1numeric__limits_3_01cutlass_1_1half__t_01_4.html#a8c7eafdd3b121353c0914dc6e1c0d108',1,'std::numeric_limits< cutlass::half_t >']]]
];
| cutlass/docs/search/all_10.js/0 | {
"file_path": "cutlass/docs/search/all_10.js",
"repo_id": "cutlass",
"token_count": 103
} | 1 |
var searchData=
[
['yes',['yes',['../structcutlass_1_1platform_1_1is__base__of__helper.html#ac1cf3f804e7686213fd42c678cc6d669',1,'cutlass::platform::is_base_of_helper']]]
];
| cutlass/docs/search/all_18.js/0 | {
"file_path": "cutlass/docs/search/all_18.js",
"repo_id": "cutlass",
"token_count": 79
} | 2 |
var searchData=
[
['arch',['arch',['../namespacecutlass_1_1arch.html',1,'cutlass']]],
['cutlass',['cutlass',['../namespacecutlass.html',1,'']]],
['debug',['debug',['../namespacecutlass_1_1debug.html',1,'cutlass']]],
['detail',['detail',['../namespacecutlass_1_1detail.html',1,'cutlass']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1thread_1_1detail.html',1,'cutlass::gemm::thread']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1host_1_1detail.html',1,'cutlass::reference::host']]],
['detail',['detail',['../namespacecutlass_1_1epilogue_1_1threadblock_1_1detail.html',1,'cutlass::epilogue::threadblock']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1threadblock_1_1detail.html',1,'cutlass::gemm::threadblock']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1detail.html',1,'cutlass::reference']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1kernel_1_1detail.html',1,'cutlass::gemm::kernel']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1detail.html',1,'cutlass::reference::device']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1kernel_1_1detail.html',1,'cutlass::reference::device::kernel']]],
['device',['device',['../namespacecutlass_1_1gemm_1_1device.html',1,'cutlass::gemm']]],
['device',['device',['../namespacecutlass_1_1reference_1_1device.html',1,'cutlass::reference']]],
['device_5fmemory',['device_memory',['../namespacecutlass_1_1device__memory.html',1,'cutlass']]],
['epilogue',['epilogue',['../namespacecutlass_1_1epilogue.html',1,'cutlass']]],
['gemm',['gemm',['../namespacecutlass_1_1gemm.html',1,'cutlass']]],
['host',['host',['../namespacecutlass_1_1reference_1_1host.html',1,'cutlass::reference']]],
['kernel',['kernel',['../namespacecutlass_1_1reduction_1_1kernel.html',1,'cutlass::reduction']]],
['kernel',['kernel',['../namespacecutlass_1_1gemm_1_1kernel.html',1,'cutlass::gemm']]],
['kernel',['kernel',['../namespacecutlass_1_1reference_1_1device_1_1kernel.html',1,'cutlass::reference::device']]],
['layout',['layout',['../namespacecutlass_1_1layout.html',1,'cutlass']]],
['library',['library',['../namespacecutlass_1_1library.html',1,'cutlass']]],
['platform',['platform',['../namespacecutlass_1_1platform.html',1,'cutlass']]],
['reduction',['reduction',['../namespacecutlass_1_1reduction.html',1,'cutlass']]],
['reference',['reference',['../namespacecutlass_1_1reference.html',1,'cutlass']]],
['thread',['thread',['../namespacecutlass_1_1gemm_1_1thread.html',1,'cutlass::gemm']]],
['thread',['thread',['../namespacecutlass_1_1reference_1_1device_1_1thread.html',1,'cutlass::reference::device']]],
['thread',['thread',['../namespacecutlass_1_1thread.html',1,'cutlass']]],
['thread',['thread',['../namespacecutlass_1_1reduction_1_1thread.html',1,'cutlass::reduction']]],
['thread',['thread',['../namespacecutlass_1_1epilogue_1_1thread.html',1,'cutlass::epilogue']]],
['thread',['thread',['../namespacecutlass_1_1transform_1_1thread.html',1,'cutlass::transform']]],
['threadblock',['threadblock',['../namespacecutlass_1_1epilogue_1_1threadblock.html',1,'cutlass::epilogue']]],
['threadblock',['threadblock',['../namespacecutlass_1_1gemm_1_1threadblock.html',1,'cutlass::gemm']]],
['threadblock',['threadblock',['../namespacecutlass_1_1transform_1_1threadblock.html',1,'cutlass::transform']]],
['transform',['transform',['../namespacecutlass_1_1transform.html',1,'cutlass']]],
['warp',['warp',['../namespacecutlass_1_1epilogue_1_1warp.html',1,'cutlass::epilogue']]],
['warp',['warp',['../namespacecutlass_1_1gemm_1_1warp.html',1,'cutlass::gemm']]]
];
| cutlass/docs/search/namespaces_0.js/0 | {
"file_path": "cutlass/docs/search/namespaces_0.js",
"repo_id": "cutlass",
"token_count": 1418
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to use the PredicatedTileIterator in CUTLASS to load data from
addressable memory, and then store it back into addressable memory.
TileIterator is a core concept in CUTLASS that enables efficient loading and storing of data to
and from addressable memory. The PredicateTileIterator accepts a ThreadMap type, which defines
the mapping of threads to a "tile" in memory. This separation of concerns enables user-defined
thread mappings to be specified.
In this example, a PredicatedTileIterator is used to load elements from a tile in global memory,
stored in column-major layout, into a fragment and then back into global memory in the same
layout.
This example uses CUTLASS utilities to ease the matrix operations.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS includes
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::reference::host::TensorFill() and
// cutlass::reference::host::TensorFillBlockSequential()
#include "cutlass/util/reference/host/tensor_fill.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define PredicatedTileIterators to load and store a M-by-K tile, in column major layout.
template <typename Iterator>
__global__ void copy(
typename Iterator::Params dst_params,
typename Iterator::Element *dst_pointer,
typename Iterator::Params src_params,
typename Iterator::Element *src_pointer,
cutlass::Coord<2> extent) {
Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x);
Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x);
// PredicatedTileIterator uses PitchLinear layout and therefore takes in a PitchLinearShape.
// The contiguous dimension can be accessed via Iterator::Shape::kContiguous and the strided
// dimension can be accessed via Iterator::Shape::kStrided
int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided;
typename Iterator::Fragment fragment;
for(size_t i = 0; i < fragment.size(); ++i) {
fragment[i] = 0;
}
src_iterator.load(fragment);
dst_iterator.store(fragment);
++src_iterator;
++dst_iterator;
for(; iterations > 1; --iterations) {
src_iterator.load(fragment);
dst_iterator.store(fragment);
++src_iterator;
++dst_iterator;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Initializes the source tile with sequentially increasing values and performs the copy into
// the destination tile using two PredicatedTileIterators, one to load the data from addressable
// memory into a fragment (regiser-backed array of elements owned by each thread) and another to
// store the data from the fragment back into the addressable memory of the destination tile.
cudaError_t TestTileIterator(int M, int K) {
// For this example, we chose a <64, 4> tile shape. The PredicateTileIterator expects
// PitchLinearShape and PitchLinear layout.
using Shape = cutlass::layout::PitchLinearShape<64, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int;
int const kThreads = 32;
// ThreadMaps define how threads are mapped to a given tile. The PitchLinearStripminedThreadMap
// stripmines a pitch-linear tile among a given number of threads, first along the contiguous
// dimension then along the strided dimension.
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<Shape, kThreads>;
// Define the PredicateTileIterator, using TileShape, Element, Layout, and ThreadMap types
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator<
Shape, Element, Layout, 1, ThreadMap>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(M, K);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(M, K);
// Allocate source and destination tensors
cutlass::HostTensor<Element, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<Element, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
// Initialize destination tensor with all -1s
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
// Initialize source tensor with sequentially increasing values
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
// Launch copy kernel to perform the copy
copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
if(result != cudaSuccess) {
std::cerr << "Error - kernel failed." << std::endl;
return result;
}
dst_tensor.sync_host();
// Verify results
for(int s = 0; s < alloc_extent[1]; ++s) {
for(int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if(c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
if(!equal) {
std::cerr << "Error - source tile differs from destination tile." << std::endl;
return cudaErrorUnknown;
}
}
}
return cudaSuccess;
}
int main(int argc, const char *arg[]) {
cudaError_t result = TestTileIterator(57, 35);
if(result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit
return result == cudaSuccess ? 0 : -1;
}
| cutlass/examples/04_tile_iterator/tile_iterator.cu/0 | {
"file_path": "cutlass/examples/04_tile_iterator/tile_iterator.cu",
"repo_id": "cutlass",
"token_count": 2658
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
*/
#include <algorithm>
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// Note that if the output is column major, the bias has to be per row. i.e. every row has different bias.
// If the output is row major, the bias has to be per column, i.e. every column has different bias.
// Below list some other notices:
//
// Note this example only works for ColumnMajor output because
// 1) we only have row major epilogue.
// 2) we swap A and B if the output is column major then we can still use the
// row major epilogue.
// 3) Mx1 bias vector becomes 1xM after the swapping/transposing.
// 4) we can use the existing OutputIterator to load 1xM bias vector.
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// Define the epilogue operation as LinearCombinationRelu. This is approximately equal to
//
// d_ij = max(0, alpha * sum_k(a_ik * b_kj) + c_ij )
//
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue, // <- data type for alpha in linear combination function
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // <- alpha x C + bias
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias(
{problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
{tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM
// to project away the N dimension by setting the stride to zero.
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha}, // <- alpha
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference gemm kernel
//
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device_reference;
// Launch device reference to compute strictly the product A * B
gemm_device_reference(
problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
0,
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
tensor_ref_d.at({i, j}) = std::max(
ElementOutput(0),
ElementOutput(tensor_ref_d.at({i, j}) + tensor_c_bias.at({i, 0}))
);
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main() {
bool notSupported = false;
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!(props.major * 10 + props.minor >= 75)) {
std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
| cutlass/examples/12_gemm_bias_relu/gemm_bias_relu.cu/0 | {
"file_path": "cutlass/examples/12_gemm_bias_relu/gemm_bias_relu.cu",
"repo_id": "cutlass",
"token_count": 5210
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "device/b2b_implicit_gemm_convolution.h"
#include "b2b_interleaved_conv2d_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_0 (
{32, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 64} // output size (NPQK)
);
cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_1 (
{32, 56, 56, 64}, // input size (NHWC)
{128, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 128} // output size (NPQK)
);
bool run_nonfused_conv2d_fprop_optimized_s8_sm80() {
using ElementA = int8_t;
using ElementB = int8_t;
using ElementC = int8_t;
using ElementAccumulator = int32_t;
using ElementCompute = float;
ElementCompute alpha0 = ElementCompute(1);
ElementCompute beta0 = ElementCompute(1); //beta=1 for bias
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape0 = cutlass::gemm::GemmShape<64, 64, 64>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape1 = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Conv2dFpropKernel0 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
WarpShape0,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop0 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel0>;
using Conv2dFpropKernel1 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape1,
WarpShape1,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop1 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel1>;
B2bInterleavedNonFusedConv2dRun<Conv2dFprop0, Conv2dFprop1, 32> nonFusedConv2d;
std::cout << "Running Non-fused back-to-back INT8 interleaved Optimized Convolution Fprops...\n";
bool pass = nonFusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
bool run_fused_conv2d_fprop_optimized_s8_sm80_rf_res() {
using ElementA = int8_t;
using ElementB = int8_t;
using ElementC = int8_t;
using ElementAccumulator = int32_t;
using ElementCompute = float;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 64>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
8 * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
using B2bConv2dFpropKernel = typename cutlass::conv::kernel::DefaultB2bConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using B2bConv2dFprop = cutlass::conv::device::B2bImplicitGemmConvolution<B2bConv2dFpropKernel>;
B2bInterleavedFusedConv2dRun<B2bConv2dFprop, 32> fusedConv2d;
std::cout << "Running Fused back-to-back INT8 interleaved Optimized Convolution Fprops with RF residency...\n";
bool pass = fusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_conv2d_fprop_optimized_s8_sm80,
&run_fused_conv2d_fprop_optimized_s8_sm80_rf_res
};
return testRun(80, funcs, "conv int8 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu/0 | {
"file_path": "cutlass/examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu",
"repo_id": "cutlass",
"token_count": 3356
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/default_b2b_conv2d_fprop.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h/0 | {
"file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h",
"repo_id": "cutlass",
"token_count": 9245
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped Back-to-back fused GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile
// (concept::MmaTensorOpFragmentIterator)
typename FragmentIteratorA1_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// FragmentIterator to load Scale or Bias vector from threadblock fragment
typename FragmentIteratorA1ScaleBias_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy1_,
/// Transformation applied to A0 operand
typename TransformA0_ = NumericArrayConverter<
typename SmemIteratorA0_::Element,
typename IteratorA0_::Element,
IteratorA0_::Fragment::kElements>,
///
/// Transformation applied to B0 operand
typename TransformB0_ = NumericArrayConverter<
typename SmemIteratorB0_::Element,
typename IteratorB0_::Element,
IteratorB0_::Fragment::kElements>,
///
/// Transformation applied to B1 operand
typename TransformB1_ = NumericArrayConverter<
typename SmemIteratorB1_::Element,
typename IteratorB1_::Element,
IteratorB1_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class B2bMmaPipelined :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2>;
using Shape0 = Shape0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA0 = IteratorA0_; ///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA0;
using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB0;
using Policy0 = Policy0_; ///< Policy describing tuning details
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
using Shape1 = Shape1_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over intermediate accumulator tile
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< Iterates over tiles of the scale and bias vectors in global memory
using FragmentIteratorA1ScaleBias =
FragmentIteratorA1ScaleBias_; ///< WarpIterator to load Scale or Bias vector from the threadblock fragment
using IteratorB1 = IteratorB1_; ///< Iterates over tiles of B operand in global memory
using Policy1 = Policy1_; ///< Policy describing tuning details
using Policy = Policy1; ///< Export Policy1 as the threadblock-level Mma's policy
using Shape = Shape1;
using SmemIteratorB1 = SmemIteratorB1_;
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using OutputOp = OutputOp_; ///< Epilogue after 1st Gemm
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
using TransformA0 = TransformA0_;
using TransformB0 = TransformB0_;
using TransformB1 = TransformB1_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA0 = typename IteratorA0::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB0 = typename IteratorB0::Fragment;
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of Scale and Bias loaded from global memory
using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB1 = typename IteratorB1::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy0::Operator::ArchTag;
/// Complex transform on A0 operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B0 operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B1 operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA0 = typename Operator0::FragmentA;
using WarpFragmentB0 = typename Operator0::FragmentB;
/// Warp Fragment of operand A1 loaded from accmulator tile
using WarpFragmentA1 = typename FragmentIteratorA1::Fragment;
/// Warp Fragment of operand A1 scale and bias loaded from threadblock fragment
using WarpFragmentA1ScaleBias =
typename FragmentIteratorA1ScaleBias::Fragment;
using WarpFragmentB1 = typename Operator1::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B0 operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Iterator to write threadblock-scoped tile of B1 operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaPipelined(
typename Base::B2bMmaSharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
int problem_size_0_n ///< GEMM0 N is used for accumulator extent
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
//These should stay the same across different GEMM layers
int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;
//These may change across different GEMM layers
int tile_offset_k_0 = Base::kWarpGemmIterations0 * warp_idx_k;
int tile_offset_k_1 = Base::kWarpGemmIterations1 * warp_idx_k;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k_0});
this->warp_tile_iterator_B0_.add_tile_offset({tile_offset_k_0, warp_idx_n});
this->warp_tile_iterator_B1_.add_tile_offset({tile_offset_k_1, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations_0, ///< number of iterations of the mainloop
FragmentC1 &accum, ///< destination accumulator tile
IteratorA0 iterator_A, ///< iterator over A operand in global memory
IteratorB0 iterator_B0, ///< iterator over B0 operand in global memory
IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand scale vectors in global memory
IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over A1 operand bias vectors in global memory
IteratorB1 iterator_B1, ///< iterator over B1 operand in global memory
FragmentC0 const &src_accum, ///< source accumualtor tile
OutputOp output_op_0, ///< epilogue operation after 1st Gemm
TransformA0 transform_A0 = TransformA0(), ///< transformation applied to A0 fragment
TransformB0 transform_B0 = TransformB0(), ///< transformation applied to B0 fragment
TransformB1 transform_B1 = TransformB1()) { ///< transformation applied to B1 fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
FragmentA0 tb_frag_A;
FragmentB0 tb_frag_B0;
tb_frag_A.clear();
tb_frag_B0.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA0 warp_frag_A0[2];
WarpFragmentB0 warp_frag_B0[2];
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
Operator0 warp_mma0;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations_0 <= 1);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
__syncthreads();
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations_0 <= 2);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 2);
}
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2],
warp_frag_B0[warp_mma_k % 2], accum0);
}
}
//2nd Gemm
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
//
// Prologue
//
FragmentA1ScaleBias tb_frag_A1_scale;
FragmentA1ScaleBias tb_frag_A1_bias;
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale);
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias);
FragmentB1 tb_frag_B1;
if(PerChannelScale)
tb_frag_A1_scale.clear();
tb_frag_A1_bias.clear();
tb_frag_B1.clear();
// The last kblock is loaded in the prolog
if(PerChannelScale)
iterator_A1_scale.load(tb_frag_A1_scale);
iterator_A1_bias.load(tb_frag_A1_bias);
iterator_B1.load(tb_frag_B1);
if(PerChannelScale)
++iterator_A1_scale;
++iterator_A1_bias;
++iterator_B1;
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
++this->smem_iterator_B1_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA1ScaleBias warp_frag_A1_scale[2];
WarpFragmentA1ScaleBias warp_frag_A1_bias[2];
WarpFragmentA1 warp_frag_A1[2];
WarpFragmentB1 warp_frag_B1[2];
this->warp_tile_iterator_B1_.set_kgroup_index(0);
if(PerChannelScale)
warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[0]);
warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[0]);
warp_tile_iterator_A1_.load(warp_frag_A1[0], warp_frag_A1_scale[0],
warp_frag_A1_bias[0], output_op_0);
this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
++warp_tile_iterator_A1_;
if(PerChannelScale)
++warp_tile_iterator_A1_scale_;
++warp_tile_iterator_A1_bias_;
++this->warp_tile_iterator_B1_;
Operator1 warp_mma1;
smem_write_stage_idx = 1;
int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1;
// Avoid reading out of bounds
iterator_B1.clear_mask(gemm_k_iterations_1 <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::WarpGemmIterations == 2.
CUTLASS_PRAGMA_UNROLL
for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
// Write fragments to shared memory
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
__syncthreads();
++this->smem_iterator_B1_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
}
smem_write_stage_idx ^= 1;
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
}
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
if(PerChannelScale)
warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[(warp_mma_k + 1) % 2]);
warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[(warp_mma_k + 1) % 2]);
warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2],
warp_frag_A1_scale[(warp_mma_k + 1) % 2],
warp_frag_A1_bias[(warp_mma_k + 1) % 2],
output_op_0);
this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
if(PerChannelScale)
++warp_tile_iterator_A1_scale_;
++warp_tile_iterator_A1_bias_;
++warp_tile_iterator_A1_;
++this->warp_tile_iterator_B1_;
if (warp_mma_k == 0) {
iterator_B1.load(tb_frag_B1);
++iterator_B1;
// Avoid reading out of bounds if this was the last loop iteration
iterator_B1.clear_mask(gemm_k_iterations_1 <= 2);
}
warp_mma1(accum, warp_frag_A1[warp_mma_k % 2],
warp_frag_B1[warp_mma_k % 2], accum);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h/0 | {
"file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h",
"repo_id": "cutlass",
"token_count": 8674
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse activation's per channel scale+bias+relu
into the wgrad mainloop.
Compared with original fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors are the same as the
activation channel number. This kernels loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h for more
technical details.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 5;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dWgradFusionKernel = typename cutlass::conv::kernel::DefaultConv2dWgradFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv2dWgradFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size,
cutlass::MatrixCoord stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "26_ampere_wgrad_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activation into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " backward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_b(options.input_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size);
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill scale vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_scale.sync_device();
tensor_b_bias.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_b_scale.device_ref(),
tensor_b_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_b.at({n, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_b.at({n, h, w, c}) *
tensor_b_scale.at({0, c}) +
tensor_b_bias.at({0, c})));
}
}
}
}
tensor_transformed_b.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv2dWgrad<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_transformed_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "26_ampere_wgrad_mainloop_fusion_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major == 8 && props.minor == 0)) {
std::cerr << "This test must run on SM80 A100.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 408};
struct Benchmark {
int h, w, c, k, r, s, stride_h, stride_w;
} layers[] = {
{56, 56, 64, 256, 1, 1, 1, 1},
{56, 56, 64, 64, 1, 1, 1, 1},
{56, 56, 64, 64, 3, 3, 1, 1},
{56, 56, 256, 64, 1, 1, 1, 1},
{56, 56, 256, 512, 1, 1, 2, 2},
{56, 56, 256, 128, 1, 1, 1, 1},
{56, 56, 128, 128, 3, 3, 2, 2},
{28, 28, 128, 512, 1, 1, 1, 1},
{28, 28, 512, 128, 1, 1, 1, 1},
{28, 28, 128, 128, 3, 3, 1, 1},
{28, 28, 512, 1024, 1, 1, 2, 2},
{28, 28, 512, 256, 1, 1, 1, 1},
{28, 28, 256, 256, 3, 3, 2, 2},
{14, 14, 256, 1024, 1, 1, 1, 1},
{14, 14, 1024, 256, 1, 1, 1, 1},
{14, 14, 256, 256, 3, 3, 1, 1},
{14, 14, 1024, 2048, 1, 1, 2, 2},
{14, 14, 1024, 512, 1, 1, 1, 1},
{14, 14, 512, 512, 3, 3, 2, 2},
{ 7, 7, 512, 2048, 1, 1, 1, 1},
{ 7, 7, 2048, 512, 1, 1, 1, 1},
{ 7, 7, 512, 512, 3, 3, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c},
{layer.k, layer.r, layer.s, layer.c},
{layer.stride_h, layer.stride_w});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu/0 | {
"file_path": "cutlass/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu",
"repo_id": "cutlass",
"token_count": 9921
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Contains additional metadata about layout permute functions used in the example.
*/
#include "cutlass/tensor_coord.h"
#include "cutlass/layout/permute.h"
/// Additional permutation metadata to facilitate testing/printing
template<typename PermuteLayout>
struct PermuteInfo;
/// Specialization for default case (no permute). Other specializations must follow this template.
template<>
struct PermuteInfo<cutlass::layout::NoPermute> {
/// Whether this is a BMM or GEMM permutation (NoPermute can actually be either)
static bool constexpr kBatched = false;
/// Minimal divisor for row extent
static int constexpr kRowFactor = 1;
/// Minimum divisor for column extent
static int constexpr kColumnFactor = 1;
/// Minimum divisor for batch size dimension
static int constexpr kBatchFactor = 1;
/// Tensor layout used in permutation operation
using Layout = cutlass::layout::PackedVectorLayout;
static std::string name() {
return "NoPermute";
}
/// User-friendly description of the permute operation
static std::string desc() {
return "no permutation";
}
/// Infer original higher-rank tensor shape from GEMM/BMM matrix extents.
/// For direct (output) permutations, must be a simple reshape of extent.
/// For inverse (input) permutations, must return shape *before* permute operation.
/// In case of NoPermute, simply use a linear (rank 1) view of the memory
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
return Layout::TensorCoord(extent.row() * extent.column() * batch_count);
}
/// Compute the permuted higher-rank tensor shape from the original shape.
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return s;
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = D1;
using Layout = cutlass::layout::TensorNHWC;
static std::string name() {
return "Tensor4DPermuteBMM0213<" + std::to_string(D1) + ">";
}
static std::string desc() {
return "batched GEMM permutation [0, 2, 1, 3]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count / D1;
int D2 = extent.row();
int D3 = extent.column();
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[2], s[1], s[3]};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D1>>
: public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = D1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count;
int D2 = extent.row();
int D3 = extent.column() / D1;
return {D0, D1, D2, D3};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = D1;
using Layout = cutlass::layout::TensorNHCW;
static std::string name() {
return "Tensor4DPermuteBMM0321<" + std::to_string(D1) + ">";
}
static std::string desc() {
return "batched GEMM permutation [0, 3, 2, 1]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count / D1;
int D2 = extent.row();
int D3 = extent.column();
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[3], s[2], s[1]};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D1>>
: public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = D1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count;
int D2 = extent.row() / D1;
int D3 = extent.column();
return {D0, D1, D2, D3};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = D1;
static int constexpr kColumnFactor = D2;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorNHWC;
static std::string name() {
return "Tensor4DPermute0213<" + std::to_string(D1) + "," + std::to_string(D2) + ">";
}
static std::string desc() {
return "normal GEMM permutation [0, 2, 1, 3]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = extent.row() / D1;
int D3 = extent.column() / D2;
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[2], s[1], s[3]};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = D2;
static int constexpr kColumnFactor = D1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = extent.row() / D2;
int D3 = extent.column() / D1;
return {D0, D1, D2, D3};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
using Layout = cutlass::layout::TensorCWHN;
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>> {
using Layout = cutlass::layout::TensorCWHN;
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T1;
static int constexpr kColumnFactor = T2 * T3;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorNDHWC;
static std::string name() {
return "Tensor5DPermute20314<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">";
}
static std::string desc() {
return "normal GEMM permutation [2, 0, 3, 1, 4]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count)
{
int const T0 = extent.row() / T1;
int const T4 = extent.column() / (T2 * T3);
return {T0, T1, T2, T3, T4};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s)
{
return {s[2], s[0], s[3], s[1], s[4]};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajorInverse<T1, T2, T3>>
: public PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T2;
static int constexpr kColumnFactor = T1 * T3;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int const T0 = extent.row() / T2;
int const T4 = extent.column() / (T1 * T3);
return {T0, T1, T2, T3, T4};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T1;
static int constexpr kColumnFactor = T2 * T3;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorCWHDN;
static std::string name() {
return "Tensor5DPermute02413<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">";
}
static std::string desc() {
return "normal GEMM permutation [0, 2, 4, 1, 3]";
}
using Coord = cutlass::Tensor5DCoord;
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count)
{
int const T0 = extent.row() / T1;
int const T4 = extent.column() / (T2 * T3);
return {T0, T1, T2, T3, T4};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s)
{
return {s[0], s[2], s[4], s[1], s[3]};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>>
: public PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T2;
static int constexpr kColumnFactor = T1 * T3;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int const T0 = extent.row() / T2;
int const T4 = extent.column() / (T1 * T3);
return {T0, T1, T2, T3, T4};
}
};
| cutlass/examples/39_gemm_permute/permute_info.h/0 | {
"file_path": "cutlass/examples/39_gemm_permute/permute_info.h",
"repo_id": "cutlass",
"token_count": 4266
} | 10 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import argparse
import torch
import sys
import os
from piped_subprocess import PipedSubprocess, TORCH_DTYPE_NAME
import math
parser = argparse.ArgumentParser()
parser.add_argument("example_exe", type=str, help="Path to the 41_fused_multi_head_attention_backward executable")
args = parser.parse_args()
torch.manual_seed(0)
dtype = torch.float16
B, Mq, Mkv, H, K, Kv = 2, 1024, 1024, 5, 128, 128
causal = True
repeat_count = 100
ATOL = {
torch.float: 5e-4,
torch.half: 9.5e-2,
torch.bfloat16: 7e-1,
}[dtype]
RTOL = {
torch.float: 1e-4,
torch.half: 2e-2,
torch.bfloat16: 1e-1,
}[dtype]
assert not (causal and Mq < Mkv), "causal only supports seqlenK <= seqlenQ"
fmha_bw_binary = args.example_exe
if not os.path.isfile(fmha_bw_binary):
print(f"""No such file: `{fmha_bw_binary}`\nDid you forget to run "make 41_fused_multi_head_attention"?""")
sys.exit(1)
def create_lower_triangular_mask():
return torch.triu(torch.full( # type: ignore
[1, Mq, Mkv],
dtype=dtype,
fill_value=float("-inf"),
), diagonal=1)
def ref_mha_bmk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMK format
q = q.float()
k = k.float()
v = v.float()
q = q * (1 / q.shape[-1] ** 0.5)
attn = q @ k.transpose(-2, -1)
if mask is not None:
attn += mask
attn_max = attn.max(-1, True).values
attn_norm = (attn - attn_max).exp().sum(-1, True)
attn = attn.softmax(-1)
lse = attn_max + attn_norm.log()
lse = lse.squeeze(2)
return attn @ v, lse
def bmhk2bmk(t):
return t.permute((0, 2, 1, 3)).reshape(
[t.shape[0] * t.shape[2], t.shape[1], t.shape[3]]
)
def ref_mha_bmhk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMHK format
assert q.ndim == 4
out, lse = ref_mha_bmk(bmhk2bmk(q), bmhk2bmk(k), bmhk2bmk(v), mask=mask)
out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]])
return out.permute((0, 2, 1, 3)), lse.reshape([q.shape[0], q.shape[2], q.shape[1]])
def ref_mha_bw_bmhk(q, k, v, mask, lse, out, grad_out, delta):
lse = lse[:, :, :q.shape[1]] #BMH, unpad Q dimension
delta = delta.reshape([-1, delta.shape[-1], 1])
# bmhk -> bmk
q, k, v, out, grad_out = [bmhk2bmk(x).float() for x in (q, k, v, out, grad_out)]
attn_T = k @ q.transpose(-2, -1)
if mask is not None:
attn_T += mask.transpose(-2, -1)
attn_T = attn_T * (1 / q.shape[-1] ** 0.5)
attn_T = attn_T - lse.reshape([-1, 1, lse.shape[-1]])
attn_T = attn_T.exp()
grad_v = attn_T @ grad_out
dov = grad_out @ v.transpose(-2, -1)
tmp = (dov - delta) * attn_T.transpose(-2, -1)
tmp = tmp / (q.shape[-1] ** 0.5)
grad_q = tmp @ k
grad_k = tmp.transpose(-2, -1) @ q
return [x.reshape([B, H, x.shape[1], x.shape[-1]]).permute([0, 2, 1, 3]) for x in [grad_q, grad_k, grad_v]]
print("initializing tensors...")
query = torch.randn([B, Mq, H, K], dtype=dtype)
key = 3 * torch.randn([B, Mkv, H, K], dtype=dtype)
value = 3 * torch.randn([B, Mkv, H, Kv], dtype=dtype)
mask = create_lower_triangular_mask() if causal else None
# let PyTorch compute gradients
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
print("computing fw...")
out, lse = ref_mha_bmhk(query, key, value, mask=mask)
out = out.to(dtype).contiguous()
grad_out = 3 * torch.randn([B, Mq, H, Kv], dtype=dtype)
print("computing bw with autograd...")
out.backward(grad_out)
scale = (1 / query.shape[-1] ** 0.5)
# Additional data needed by the kernel
delta = (grad_out.float() * out.float()).sum(-1).transpose(-2, -1).contiguous()
pad_amount = (32 - (lse.shape[2] % 32)) % 32
lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf)
print("computing bw with reference implem...")
gQr, gKr, gVr = ref_mha_bw_bmhk(query, key, value, mask, lse, out, grad_out, delta)
with PipedSubprocess(fmha_bw_binary) as bw_kernel:
# Send kernel arguments
bw_kernel.write(
TORCH_DTYPE_NAME[query.dtype],
"scale", scale,
"head_dim", K,
"head_dim_value", Kv,
"num_queries", Mq,
"num_keys", Mkv,
"num_heads", H,
"custom_mask_type", (1 if causal else 0),
"num_batches", B,
"repeat_count", repeat_count,
"num_splits_key", (Mkv // 128),
)
bw_kernel.writeTensor(query, "query", ["q_strideB", "q_strideM", "q_strideH"])
bw_kernel.writeTensor(key, "key", ["k_strideB", "k_strideM", "k_strideH"])
bw_kernel.writeTensor(value, "value", ["v_strideB", "v_strideM", "v_strideH"])
bw_kernel.writeTensor(lse, "logsumexp", ["lse_strideB", "lse_strideH"])
bw_kernel.writeTensor(out, "output", ["o_strideB", "o_strideM", "o_strideH"])
bw_kernel.writeTensor(grad_out, "grad_output", ["gO_strideB", "gO_strideM", "gO_strideH"])
bw_kernel.writeTensor(delta, "delta", ["delta_strideB", "delta_strideH"])
if bw_kernel.read() != "OK":
print("Got unexpected output")
print(bw_kernel.subp.communicate()[0])
sys.exit(0)
# Read kernel output
gQ = bw_kernel.readTensor("grad_query", ["gQ_strideB", "gQ_strideM", "gQ_strideH"], query.shape).float()
gK = bw_kernel.readTensor("grad_key", ["gK_strideB", "gK_strideM", "gK_strideH"], key.shape).float()
gV = bw_kernel.readTensor("grad_value", ["gV_strideB", "gV_strideM", "gV_strideH"], value.shape).float()
runtime_ms = float(bw_kernel.readNamed("runtime_ms"))
float_ops = B * H * sum([
# att = Q @ K.transpose
Mq * Mkv * K * 2,
# att @ dO
Mkv * Mq * Kv * 2,
# dov = dO @ V
Mq * Kv * Mkv * 2,
# dov @ K
Mq * K * Mkv * 2,
# dov @ Q
Mq * K * Mkv * 2,
])
if causal:
float_ops //= 2
print(f"""
Fused multi-head attention - backward
batch_size={B}
num_queries={Mq}
num_keys={Mkv}
num_heads={H}
head_dim={K}
head_dim_value={Kv}
Correctness:
grad_query: {"PASS" if torch.allclose(gQ, gQr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gQ - gQr).abs().max()})
grad_key: {"PASS" if torch.allclose(gK, gKr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gK - gKr).abs().max()})
grad_value: {"PASS" if torch.allclose(gV, gVr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gV - gVr).abs().max()})
(atol={ATOL} / rtol={RTOL})
Runtime: {runtime_ms}ms ({(float_ops / (1024 ** 4)) / (runtime_ms / 1000):.4f} TFlops)
""")
assert torch.allclose(query.grad.float(), gQr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(key.grad.float(), gKr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(value.grad.float(), gVr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
| cutlass/examples/41_fused_multi_head_attention/fmha_backward_test.py/0 | {
"file_path": "cutlass/examples/41_fused_multi_head_attention/fmha_backward_test.py",
"repo_id": "cutlass",
"token_count": 3659
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename Element_,
int ElementsPerAccess
>
struct DefaultThreadMapTensorOpForFusedBias {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using Element = Element_;
static int const kElementsPerAccess = ElementsPerAccess;
//
// Definitions
//
struct Detail {
/// Tensor Operations fundamentally perform operations on 8 rows
static int const kTensorOpRows = 8;
static int const kWarpSize = 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kM % WarpShape::kM), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMapBiasAct <
OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>,
OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<Element>::value
>;
};
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h/0 | {
"file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h",
"repo_id": "cutlass",
"token_count": 1126
} | 12 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
def type_2_cutlass_type(input_type = "fp16"):
# float point type
if input_type == "fp32":
return "float"
if input_type == "bf16":
return "cutlass::bfloat16_t"
if input_type == "fp16":
return "cutlass::half_t"
# integer type
if(input_type == "int32"):
return "int32_t"
if(input_type == "int8"):
return "int8_t"
if input_type == 'Row':
return 'cutlass::layout::RowMajor'
if input_type == 'Col':
return 'cutlass::layout::ColumnMajor'
def cvt_2_cutlass_shape(gemm_shape):
# gemm shape
if len(gemm_shape) == 3:
val = "cutlass::gemm::GemmShape<" \
+ str(gemm_shape[0]) + ", " \
+ str(gemm_shape[1]) + ", " \
+ str(gemm_shape[2]) + ">"
return val
def write_2_headfile(filename, file_dir, string):
with open(file_dir + filename, 'w') as f:
f.write("/* Auto Generated code - Do not edit.*/\n\n\n#pragma once\n" + string)
def var_idx(varaiable, index):
return varaiable + str(index)
def list_2_string(input_list, ):
rtn_string = ""
cnt = 0
for element in input_list:
final = ", \n"
if cnt == len(input_list) - 1:
final = "\n"
cnt += 1
rtn_string += str(element) + final
return rtn_string
def get_epilogue_info(layer_info):
return layer_info['epilogue']
def get_epilogue_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['tp']
def get_epilogue_add_bias_or_not(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['addbias']
def get_epilogue_add_bias_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['bias_tp']
def get_epilogue_args(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['args']
def get_epilogue_bias_shape(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
if bias_tp == 'mat':
mn_shape[0] = 'M'
return mn_shape
elif bias_tp == 'vec':
mn_shape[0] = 1
return mn_shape
else:
assert(0)
def get_epilogue_bias_ldm(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
c_layout = layer_info['C_format'].lower()
if c_layout != 'row':
assert(0)
if bias_tp == 'mat':
return mn_shape[1]
elif bias_tp == 'vec':
return 0
else:
assert(0)
def get_epilogue_compute_tp(layer_info):
return layer_info['Acc_tp']
| cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py/0 | {
"file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py",
"repo_id": "cutlass",
"token_count": 1814
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run depthwise 2d convolution kernels using functions and data structures
provided by CUTLASS using SIMT instruction;
There are 3 types of implementations of depthwise 2d convoltion
1. kAnalytic
Implicit gemm 2d convoltion algorithm.
2. kOptimized
An optimized algorithm and supports arbitrary stride and dilation.
3. kFixedStrideDilation
An optimized algorithm with fixed stride and dilation to reduce the runtime computation and do
more optimizations.
In general, the perf of kFixedStrideDilation would be better than kOptimized. However, if the filter
size, stride or dilation is large, it would encounter register spilling and may hurt the perf. If
in this case, please use kOptimized.
For kOptimized and kFixedStrideDilation, in order to fully utilize GPU hardware resources and achieve
better perf, when the output tensor size is large, splitk should be enabled to achieve better perf.
In this example, it demonstrates how to construct and run a FixedStrideDilation depthwise 2d
convolution kernel.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_depthwise_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = cutlass::half_t; // Data type of accumulator
using ElementComputeEpilogue = cutlass::half_t; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = cutlass::half_t; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm60;
// This code section describes the groups a thread block will compute
constexpr int groups_per_cta = 64;
// This code section describes the output tile <N, O, P, Q> a thread block will compute
using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>;
// This code section describes the filter shape <R, S>
using FilterShape = cutlass::MatrixShape<3, 3>;
// Threadblock tile shape
using ThreadblockShape =
cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>;
// This code section describes tile size a warp will computes
// WarpShape::kM = P * Q the warps would process
// WarpShape::kN = groups_per_cta that the warps would process
// WarpShape::kK = filter_size that the warps would process
using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>;
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is kFixedStrideDilation
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm =
cutlass::conv::IteratorAlgorithm::kFixedStrideDilation;
using StrideShape = cutlass::MatrixShape<1, 1>;
using DilationShape = cutlass::MatrixShape<1, 1>;
constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
kEpilogueElementsPerAccess, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha/beta in linear combination
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling>; // Epilogue scaling operation.
using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kFixed,
StrideShape,
DilationShape>::Kernel;
using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int groups;
int splitk;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
std::string tag;
Options()
: help(false),
input_size(1, 128, 128, 32),
filter_size(32, 3, 3, 1),
groups(32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
splitk(1) {}
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// depthwise conv
if (groups != input_size.c()) {
return false;
}
if (filter_size.n() != groups) {
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) {
return false;
}
// Filter size passed through command line does not match filter size template parameter
if (filter_size.h() != FilterShape::kRow || filter_size.w() != FilterShape::kColumn) {
std::cerr << "Filter size passed in (" << filter_size.h() << "x" << filter_size.w() << ") "
<< "must match the FilterShape template parameter of the convolution "
<< "(" << FilterShape::kRow << "x" << FilterShape::kColumn << "). "
<< "To use the filter shape passed in, change the FilterShape template "
<< "parameter and recompile this example."
<< std::endl;
return false;
}
return true;
}
/// Updates input and filter sizes
void update(cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
cmd.get_cmd_line_argument("g", groups);
filter_size.c() = 1;
filter_size.n() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("splitk", splitk);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
int32_t padding_h = filter_size.h() / 2;
int32_t padding_w = filter_size.w() / 2;
padding = {padding_h, padding_h, padding_w, padding_w};
}
/// Prints the usage statement.
std::ostream &print_usage(std::ostream &out) const {
out << "46_depthwise_gemm_fprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --g=<int> Groups\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --splitk=<int> Enable splitK\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results "
"table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=32 "
"--h=224 --w=224 --c=128 --k=128 --g=128 --r=3 --s=3\n\n"
<< "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=1 "
"--h=224 --w=224 --c=32 --k=32 --g=32 --r=3 --s=3 --splitk=10 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas =
output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result()
: runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) {}
static std::ostream &print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,G,stride_h,stride_w,dilation_h,dilation_w,splitK,Runtime,GFLOPs";
return out;
}
std::ostream &print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
cutlass::Tensor4DCoord output_size = options.output_size();
out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << ","
<< options.input_size.w() << "," << options.input_size.c() << ","
<< options.filter_size.n() << "," << options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.groups << "," << options.conv_stride.row() << "," << options.conv_stride.column()
<< ","
<< options.dilation.row() << "," << options.dilation.column() << ","
<< options.splitk << ","
<< runtime_ms << "," << gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one testcase
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b_transpose(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(), 1, ElementInputA(5), ElementInputA(-6), 0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(), 1, ElementInputB(3), ElementInputB(-6), 0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(), 1, ElementOutput(5), ElementOutput(-6), 0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_transpose.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split P*Q into multiple CTA
int split_k_slices = options.splitk;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices,
options.groups);
// Construct Direc2dConv::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename Direct2dConv::Arguments arguments{problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
tensor_b_transpose.device_ref()};
//
// Initialize CUTLASS Convolution
//
Direct2dConv implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator >(problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_d.host_ref(),
options.alpha,
options.beta);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
bool passed =
cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
} else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
} else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "46_depthwise_simt_conv2dfprop" << options.input_size.n() << "x" << options.input_size.h()
<< "x" << options.input_size.w() << "x" << options.input_size.c() << "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x"
<< options.filter_size.w() << "x" << options.filter_size.c() << ".dat";
std::ofstream output_workspace(ss.str());
output_workspace << "Input = \n"
<< tensor_a.host_view() << "\n\n"
<< "Filters = \n"
<< tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto &event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error)
<< std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 6)) {
std::cerr << "Run on a machine with compute capability at least 60." << std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu/0 | {
"file_path": "cutlass/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu",
"repo_id": "cutlass",
"token_count": 9564
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/numeric/math.hpp"
namespace example
{
// Naive grid-stride loop implementation of gather
template<typename Element, typename Func>
__global__ void
gather_kernel(Element const * __restrict__ input,
Element * __restrict__ output,
Func func,
int num_elems_input,
int num_elems_output,
cutlass::FastDivmod stride_divmod)
{
Element const * input_b = input + blockIdx.z * num_elems_input;
Element * output_b = output + blockIdx.z * num_elems_output;
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = tidx; k < num_elems_output; k += blockDim.x * gridDim.x) {
int i,j;
stride_divmod(j, i, k);
output_b[k] = input_b[i + func(j) * stride_divmod.divisor];
}
}
// Gather elements along strided dimension of the tensor according to given indices
template<typename Element, typename Func>
void
gather(Element const * input,
Element * output,
Func func,
int batch_size,
int num_elems_input,
int num_elems_output,
int stride,
cutlass::KernelHardwareInfo const& hw_info)
{
// Upcast to uint128_t data type
int factor = 128 / cutlass::sizeof_bits<Element>::value;
assert(stride % factor == 0);
int stride_upcast = stride/factor;
int num_elems_input_upcast = num_elems_input / factor;
int num_elems_output_upcast = num_elems_output / factor;
cutlass::FastDivmod stride_divmod(stride_upcast);
dim3 blocks(hw_info.sm_count, 1, batch_size);
gather_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input),
reinterpret_cast<cute::uint128_t *>(output),
func,
num_elems_input_upcast,
num_elems_output_upcast,
stride_divmod);
}
// Naive grid-stride loop implementation of scatter
template<typename Element, typename Func>
__global__ void
scatter_kernel(Element const * __restrict__ input,
Element * __restrict__ output,
Func func,
int num_elems_input,
int num_elems_output,
cutlass::FastDivmod stride_divmod)
{
Element const * input_b = input + blockIdx.z * num_elems_input;
Element * output_b = output + blockIdx.z * num_elems_output;
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = tidx; k < num_elems_input; k += blockDim.x * gridDim.x) {
int i,j;
stride_divmod(j, i, k);
output_b[i + func(j) * stride_divmod.divisor] = input_b[k];
}
}
// Gather elements along strided dimension of the tensor according to given indices
template<typename Element, typename Func>
void
scatter(Element const * input,
Element * output,
Func func,
int batch_size,
int num_elems_input,
int num_elems_output,
int stride,
cutlass::KernelHardwareInfo const& hw_info)
{
// Upcast to uint128_t data type
int factor = 128 / cutlass::sizeof_bits<Element>::value;
assert(stride % factor == 0);
int stride_upcast = stride/factor;
int num_elems_input_upcast = num_elems_input / factor;
int num_elems_output_upcast = num_elems_output / factor;
cutlass::FastDivmod stride_divmod(stride_upcast);
dim3 blocks(hw_info.sm_count, 1, batch_size);
scatter_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input),
reinterpret_cast<cute::uint128_t *>(output),
func,
num_elems_input_upcast,
num_elems_output_upcast,
stride_divmod);
}
} // namespace example
| cutlass/examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh/0 | {
"file_path": "cutlass/examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh",
"repo_id": "cutlass",
"token_count": 2200
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class TiledCopyA,
class TB, class BStride, class BSmemLayout, class TiledCopyB,
class TC, class CStride, class CSmemLayout, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a,
TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b,
TC * C, CStride dC, CSmemLayout , TiledMma mma,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads
CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K)
//
// Partition the copying of A and B tiles across the threads
//
// TUTORIAL: Example of partitioning via a TiledCopy
ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x);
Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k)
Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K)
Tensor tArA = make_fragment_like(tAsA); // (CPY,CPY_M,CPY_K)
ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x);
Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k)
Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tArA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tArA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBrB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBrB)); // CPY_K
// Copy gmem to rmem for k_tile=0
copy(copy_a, tAgA(_,_,_,0), tArA);
copy(copy_b, tBgB(_,_,_,0), tBrB);
//
// Define A/B partitioning and C accumulators
//
// TUTORIAL: Example of partitioning via a TiledMMA
ThrMMA thr_mma = mma.get_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate registers for pipelining
Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K)
// Allocate the accumulators -- same size as the projected data
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V( shape(tCrA) == shape(tCsA)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrB) == shape(tCsB)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
//
// PIPELINED MAIN LOOP
// TUTORIAL: Example of a gemm loop that pipelines shared memory AND register memory
// Data is read from global to registers, then to shared via the tA|tB partitions
// Data is then copied from shared to registers in multiple waves via the tC partitions
// and gemm(.) operates on the current register wave
//
// Load A, B shmem->regs for k_block=0
copy(tCsA(_,_,0), tCrA(_,_,0));
copy(tCsB(_,_,0), tCrB(_,_,0));
auto K_TILE_MAX = size<3>(tAgA);
auto K_BLOCK_MAX = size<2>(tCrA);
CUTE_NO_UNROLL
for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile)
{
// Pipeline the k-mode of the block registers
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Copy rmem to smem
__syncthreads();
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Copy smem to rmem for k_block+1
int k_block_next = (k_block + 1) % K_BLOCK_MAX;
copy(tCsA(_,_,k_block_next), tCrA(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB(_,_,k_block_next));
if (k_block == 0)
{
// Copy gmem to rmem for k_tile+1
int k_tile_next = (k_tile + 1 < K_TILE_MAX) ? k_tile + 1 : k_tile;
copy(copy_a, tAgA(_,_,_,k_tile_next), tArA);
copy(copy_b, tBgB(_,_,_,k_tile_next), tBrB);
}
// Thread-level register gemm for k_block
gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
} // k_block
} // k_tile
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TA>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 m-major
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TB>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 n-major
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
// Setup params for a TN GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape ( bM, bK),
make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major
auto sB = make_layout(make_shape ( bN, bK),
make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<TA>, TA>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<TB>, TB>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cout << "This example requires an Volta GPU or newer (CC >= 70)" << std::endl;
// Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits.
return 0;
}
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| cutlass/examples/cute/tutorial/sgemm_sm70.cu/0 | {
"file_path": "cutlass/examples/cute/tutorial/sgemm_sm70.cu",
"repo_id": "cutlass",
"token_count": 9373
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/tensor.hpp>
#include <cute/atom/mma_atom.hpp>
/** The gemm algorithm takes four (or three) tensors and computes
* D = A * B + C
* It dispatches based on the number of modes each tensor has:
*
* 1. `(V) x (V) => (V)`.
* The element-wise product of vectors. Dispatches to FMA or MMA.
* 2. `(M) x (N) => (M,N)`.
* The outer product of vectors. Dispatches to [3] with new mode K=(1).
* 3. `(M,K) x (N,K) => (M,N)`.
* The product of matrices. Dispatches to [5] with MMA vector-mode V.
* 4. `(V,M) x (V,N) => (V,M,N)`.
* The batched outer product of vectors. Accounts for register reuse and dispatches to [1] for each (m,n).
* 5. `(V,M,K) x (V,N,K) => (V,M,N)`.
* The batched product of matrices. Dispatches to [4] for each (k).
*/
namespace cute
{
//
// Three arguments to four
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(C, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(mma, C, A, B, C);
}
//
// Accept mutable temporaries
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(C, A, B, C);
}
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(D, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(mma, C, A, B, C);
}
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(mma, D, A, B, C);
}
//
// Default MMA is UniversalFMA
//
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
using MMA = MMA_Atom<UniversalFMA<typename Tensor<TD,DLayout>::value_type,
typename Tensor<TA,ALayout>::value_type,
typename Tensor<TB,BLayout>::value_type,
typename Tensor<TC,CLayout>::value_type>>;
return gemm(MMA{}, D, A, B, C);
}
//
// Thread-Local Register-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 1 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 1 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V) Logical data
Tensor<TA, ALayout> const& A, // (V) Logical data
Tensor<TB, BLayout> const& B, // (V) Logical data
Tensor<TC, CLayout> const& C) // (V) Logical data
{
// No static assertions on (V), MMA checks compatibility
mma.call(D, A, B, C);
}
// Dispatch [2]: (M) x (N) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M) Logical data
Tensor<TB, BLayout> const& B, // (N) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
gemm(mma,
D, // (M,N)
make_tensor(A.data(), append<2>(A.layout())), // (M,1)
make_tensor(B.data(), append<2>(B.layout())), // (N,1)
C); // (M,N)
}
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M) Logical data
Tensor<TB, BLayout> const& B, // (V,N) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto M = size<1>(A);
auto N = size<1>(B);
// REGISTER .reuse OPTIMIZATIONS
// 64-bit traversal specialization -- serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
#else
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
#endif
} else
// 32-bit traversal specialization -- kinked serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major kinked serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; m += 2) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 2) ? N-1-n : n;
gemm(mma, D(_,m+0,ns), A(_,m+0), B(_,ns), C(_,m+0,ns));
if (m+1 < M) {
gemm(mma, D(_,m+1,ns), A(_,m+1), B(_,ns), C(_,m+1,ns));
}
}
}
#else
// Col-major kinked serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; n += 2) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
// Kinked serpentine traversal for maximum register reuse
int ms = (n & 2) ? M-1-m : m;
gemm(mma, D(_,ms,n+0), A(_,ms), B(_,n+0), C(_,ms,n+0));
if (n+1 < N) {
gemm(mma, D(_,ms,n+1), A(_,ms), B(_,n+1), C(_,ms,n+1));
}
}
}
#endif
} else
// 64-bit + 32-bit traversal order -- keep A (64-bit) in the outer loop and serpentine B
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4) {
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
} else
// 32-bit + 64-bit traversal order -- keep B (64-bit) in the outer loop and serpentine A
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8) {
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
} else
// Fallback to serpentine loop
{
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
}
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_rmem<TA>::value &&
BLayout::rank == 3 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k) {
gemm(mma, D, A(_,_,k), B(_,_,k), C);
}
}
//
// Thread-Local Shared-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
// Dispatch [2]: (M) x (N) => (M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_smem<TA>::value &&
BLayout::rank == 3 && is_smem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto rA = MMA_Atom<MMA>::make_fragment_A(A);
auto rB = MMA_Atom<MMA>::make_fragment_B(B);
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k)
{
copy(A(_,_,k), rA(_,_,k));
copy(B(_,_,k), rB(_,_,k));
// Thread-level register gemm for k
gemm(mma, D, rA(_,_,k), rB(_,_,k), C);
}
}
} // end namespace cute
| cutlass/include/cute/algorithm/gemm.hpp/0 | {
"file_path": "cutlass/include/cute/algorithm/gemm.hpp",
"repo_id": "cutlass",
"token_count": 9055
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))
# define CUTE_ARCH_MMA_SM75_SUPPORTED
# if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750))
# define CUTE_ARCH_MMA_SM75_ENABLED
# endif
#endif
namespace cute
{
//
// SM75 MMA 1688 F16F16F32
//
struct SM75_16x8x8_F32F16F16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = float[4];
// Register asm fma
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
float const& c0, float const& c1, float const& c2, float const& c3)
{
#if defined(CUTE_ARCH_MMA_SM75_ENABLED)
asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32"
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM75_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM75_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// SM75 MMA 8816 S8S8S32
//
struct SM75_8x8x16_S32S8S8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
// Register asm fma
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM75_ENABLED)
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32"
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM75_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM75_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| cutlass/include/cute/arch/mma_sm75.hpp/0 | {
"file_path": "cutlass/include/cute/arch/mma_sm75.hpp",
"repo_id": "cutlass",
"token_count": 1728
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/mma.hpp>
#include <cute/tensor.hpp>
namespace cute
{
namespace detail {
template <class X, class = void>
struct supports_output_scaling { static constexpr bool value = false; };
template <class X>
struct supports_output_scaling<X, void_t<decltype(declval<X>().accumulate_)>> { static constexpr bool value = true; };
} // end namespace detail
/**
* concept MMA_Traits
* {
* using ValTypeD = // Logical A-value type
* using ValTypeA = // Logical B-value type
* using ValTypeB = // Logical C-value type
* using ValTypeC = // Logical D-value type (NOTE: Not used? Assumed == ValTypeD)
*
* using FrgTypeA = // A-type consumed by MMA (if ommitted, same as ValTypeA)
* using FrgTypeB = // B_type consumed by MMA (if ommitted, same as ValTypeB)
* using FrgTypeC = // C_type consumed by MMA (if ommitted, same as ValTypeC)
*
* using Shape_MNK = // Logical MxNxK shape of the MMA
*
* using ThrID = // Logical thread id (tid) -> tidx
*
* using ALayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MK-coord
* using BLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat NK-coord
* using CLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MN-coord
* };
*/
template <class MMAOperation, class... MMAOpArgs>
struct MMA_Traits
{
static_assert(sizeof(MMAOperation) == 0, "MMA_Traits not implemented for this MMA_Operation.");
};
template <class D, class A, class B, class C>
struct MMA_Traits<UniversalFMA<D,A,B,C>>
{
using ValTypeD = D;
using ValTypeA = A;
using ValTypeB = B;
using ValTypeC = C;
// Logical shape of the MMA
using Shape_MNK = Shape<_1,_1,_1>;
// Logical thread id (tid) -> tidx
using ThrID = Layout<_1>;
// (Logical thread id (tid), Logical value id (vid)) -> coord
// (tid,vid) -> (m,k)
using ALayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (n,k)
using BLayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (m,n)
using CLayout = Layout<Shape<_1,_1>>;
};
//
// Generic mma_unpack for any MMA_Traits
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
static_assert(is_rmem<TD>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TA>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TB>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TC>::value, "Expected registers in MMA_Atom::call");
// Register value types from the MMA_Operation register arrays
using RegTypeD = typename remove_extent<typename MMA_Op::DRegisters>::type;
using RegTypeA = typename remove_extent<typename MMA_Op::ARegisters>::type;
using RegTypeB = typename remove_extent<typename MMA_Op::BRegisters>::type;
using RegTypeC = typename remove_extent<typename MMA_Op::CRegisters>::type;
using MMATraits = MMA_Traits<MMA_Op, MMA_Args...>;
[[maybe_unused]] constexpr int RegNumD = extent<typename MMA_Op::DRegisters>::value;
constexpr int RegNumA = extent<typename MMA_Op::ARegisters>::value;
constexpr int RegNumB = extent<typename MMA_Op::BRegisters>::value;
constexpr int RegNumC = extent<typename MMA_Op::CRegisters>::value;
Tensor rA = recast<RegTypeA>(A);
Tensor rB = recast<RegTypeB>(B);
CUTE_STATIC_ASSERT_V(size(rA) == Int<RegNumA>{});
CUTE_STATIC_ASSERT_V(size(rB) == Int<RegNumB>{});
if constexpr (is_same<RegTypeD, void>::value)
{
static_assert(is_same<typename TD::value_type, typename TC::value_type>::value, "GMMA C and D value_type must match.");
static_assert(is_same<DLayout, CLayout>::value, "GMMA C and D layouts must match.");
// assert((void*)&C == (void*)&D);
Tensor rC = recast<RegTypeC>(D); // NOTE: D and C are same, so use mutable D
//CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
else {
Tensor rD = recast<RegTypeD>(D);
Tensor rC = recast<RegTypeC>(C);
CUTE_STATIC_ASSERT_V(size(rD) == Int<RegNumD>{});
CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
}
//
// Accept mutable temporaries
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
mma_unpack(traits, D, A, B, C);
}
namespace detail {
template <class X, class = void>
struct FrgTypeA_or_Default { using type = typename X::ValTypeA; };
template <class X>
struct FrgTypeA_or_Default<X,void_t<typename X::FrgTypeA>> { using type = typename X::FrgTypeA; };
template <class X, class = void>
struct FrgTypeB_or_Default { using type = typename X::ValTypeB; };
template <class X>
struct FrgTypeB_or_Default<X,void_t<typename X::FrgTypeB>> { using type = typename X::FrgTypeB; };
template <class X, class = void>
struct FrgTypeC_or_Default { using type = typename X::ValTypeC; };
template <class X>
struct FrgTypeC_or_Default<X,void_t<typename X::FrgTypeC>> { using type = typename X::FrgTypeC; };
} // end namespace detail
} // namespace cute
| cutlass/include/cute/atom/mma_traits.hpp/0 | {
"file_path": "cutlass/include/cute/atom/mma_traits.hpp",
"repo_id": "cutlass",
"token_count": 3561
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/container/array.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/numeric/integral_constant.hpp>
/** IntTuple is an integer or a tuple of IntTuples.
* This file holds utilities for working with IntTuples,
* but does not hold a concrete concept or class of IntTuple.
*/
namespace cute
{
// Implementation of get<0>(Integral).
// Even though is_tuple<Integral> is false and tuple_size<Integral> doesn't compile,
// CuTe defines rank(Integral) as 1, so it's useful for get<0>(Integral) to return its input
template <size_t I, class T, __CUTE_REQUIRES(cute::is_integral<cute::remove_cvref_t<T>>::value)>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(T&& t) noexcept
{
static_assert(I == 0, "Index out of range");
return static_cast<T&&>(t);
}
// Custom recursive get for anything that implements get<I>(.) (for a single integer I).
template <size_t I0, size_t I1, size_t... Is, class T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(T&& t) noexcept
{
return get<I1, Is...>(get<I0>(static_cast<T&&>(t)));
}
//
// rank
//
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
rank(IntTuple const& t)
{
if constexpr (sizeof...(Is) == 0) {
if constexpr (is_tuple<IntTuple>::value) {
return Int<tuple_size<IntTuple>::value>{};
} else {
return Int<1>{};
}
} else {
return rank(get<Is...>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class IntTuple>
using rank_t = decltype(rank(declval<IntTuple>()));
template <class IntTuple>
static constexpr int rank_v = rank_t<IntTuple>::value;
//
// shape
//
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
shape(IntTuple const& s)
{
if constexpr (is_tuple<IntTuple>::value) {
return transform(s, [](auto const& a) { return shape(a); });
} else {
return s;
}
CUTE_GCC_UNREACHABLE;
}
template <int I, int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
shape(IntTuple const& s)
{
if constexpr (is_tuple<IntTuple>::value) {
return shape<Is...>(get<I>(s));
} else {
return get<I,Is...>(shape(s));
}
CUTE_GCC_UNREACHABLE;
}
//
// max
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
max(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::max(cute::apply(t0, [](auto const&... a){ return cute::max(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::max(t0, cute::max(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// min
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
min(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::min(cute::apply(t0, [](auto const&... a){ return cute::min(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::min(t0, cute::min(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// gcd
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
gcd(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::gcd(cute::apply(t0, [](auto const&... a){ return cute::gcd(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::gcd(t0, cute::gcd(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// depth
//
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
depth(IntTuple const& t)
{
if constexpr (sizeof...(Is) == 0) {
if constexpr (is_tuple<IntTuple>::value) {
return Int<1>{} + cute::apply(t, [](auto const&... v){ return cute::max(depth(v)...); });
} else {
return Int<0>{};
}
} else {
return depth(get<Is...>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class Tuple>
using depth_t = decltype(depth(declval<Tuple>()));
template <class Tuple>
static constexpr int depth_v = depth_t<Tuple>::value;
//
// product
//
// Implementation of product as a function object
struct Product
{
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
operator()(IntTuple const& a) const
{
if constexpr (is_tuple<IntTuple>::value) {
if constexpr (tuple_size<IntTuple>::value == 0) {
return Int<1>{};
} else {
return cute::transform_apply(a, Product{}, multiplies_unary_lfold{});
}
} else if constexpr (cute::is_integral<IntTuple>::value) {
return a;
}
CUTE_GCC_UNREACHABLE;
}
};
// Callable product function object
CUTE_INLINE_CONSTANT Product product;
// Return a rank(t) tuple @a result such that get<i>(@a result) = product(get<i>(@a t))
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
product_each(Tuple const& t)
{
return transform(wrap(t), product);
}
// Take the product of Tuple at the leaves of TupleG
template <class Tuple, class TupleG>
CUTE_HOST_DEVICE constexpr
auto
product_like(Tuple const& tuple, TupleG const& guide)
{
return transform_leaf(guide, tuple, [](auto const& g, auto const& t) { return product(t); });
}
// Return the product of elements in a mode
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
size(IntTuple const& a)
{
if constexpr (sizeof...(Is) == 0) {
return product(a);
} else {
return size(get<Is...>(a));
}
CUTE_GCC_UNREACHABLE;
}
template <class IntTuple>
static constexpr int size_v = decltype(size(declval<IntTuple>()))::value;
//
// sum
//
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
sum(IntTuple const& a)
{
if constexpr (is_tuple<IntTuple>::value) {
return cute::apply(a, [](auto const&... v){ return (Int<0>{} + ... + sum(v)); });
} else {
return a;
}
CUTE_GCC_UNREACHABLE;
}
//
// inner_product
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
inner_product(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
static_assert(tuple_size<IntTupleA>::value == tuple_size<IntTupleB>::value, "Mismatched ranks");
return transform_apply(a, b, [](auto const& x, auto const& y) { return inner_product(x,y); },
[](auto const&... v) { return (Int<0>{} + ... + v); });
} else {
return a * b;
}
CUTE_GCC_UNREACHABLE;
}
//
// ceil_div
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
ceil_div(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
if constexpr (is_tuple<IntTupleB>::value) { // tuple tuple
static_assert(tuple_size<IntTupleA>::value >= tuple_size<IntTupleB>::value, "Mismatched ranks");
constexpr int R = tuple_size<IntTupleA>::value; // Missing ranks in TupleB are implicitly 1
return transform(a, append<R>(b,Int<1>{}), [](auto const& x, auto const& y) { return ceil_div(x,y); });
} else { // tuple int
auto const [result, rest] = fold(a, cute::make_tuple(cute::make_tuple(), b),
[] (auto const& init, auto const& ai) {
return cute::make_tuple(append(get<0>(init), ceil_div(ai, get<1>(init))), ceil_div(get<1>(init), ai));
});
return result;
}
} else
if constexpr (is_tuple<IntTupleB>::value) { // int tuple
return ceil_div(a, product(b));
} else {
return (a + b - Int<1>{}) / b;
}
CUTE_GCC_UNREACHABLE;
}
//
// round_up
// Round @a a up to the nearest multiple of @a b.
// For negative numbers, rounds away from zero.
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
round_up(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
static_assert(tuple_size<IntTupleA>::value >= tuple_size<IntTupleB>::value, "Mismatched ranks");
constexpr int R = tuple_size<IntTupleA>::value; // Missing ranks in TupleB are implicitly 1
return transform(a, append<R>(b,Int<1>{}), [](auto const& x, auto const& y) { return round_up(x,y); });
} else {
return ((a + b - Int<1>{}) / b) * b;
}
CUTE_GCC_UNREACHABLE;
}
/** Division for Shapes
* Case Tuple Tuple:
* Perform shape_div element-wise
* Case Tuple Int:
* Fold the division of b across each element of a
* Example: shape_div((4,5,6),40) -> shape_div((1,5,6),10) -> shape_div((1,1,6),2) -> (1,1,3)
* Case Int Tuple:
* Return shape_div(a, product(b))
* Case Int Int:
* Enforce the divisibility condition a % b == 0 || b % a == 0 when possible
* Return a / b with rounding away from 0 (that is, 1 or -1 when a < b)
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
shape_div(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
if constexpr (is_tuple<IntTupleB>::value) { // tuple tuple
static_assert(tuple_size<IntTupleA>::value == tuple_size<IntTupleB>::value, "Mismatched ranks");
return transform(a, b, [](auto const& x, auto const& y) { return shape_div(x,y); });
} else { // tuple int
auto const [result, rest] = fold(a, cute::make_tuple(cute::make_tuple(), b),
[] (auto const& init, auto const& ai) {
return cute::make_tuple(append(get<0>(init), shape_div(ai, get<1>(init))), shape_div(get<1>(init), ai));
});
return result;
}
} else
if constexpr (is_tuple<IntTupleB>::value) { // int tuple
return shape_div(a, product(b));
} else
if constexpr (is_static<IntTupleA>::value && is_static<IntTupleB>::value) {
static_assert(IntTupleA::value % IntTupleB::value == 0 || IntTupleB::value % IntTupleA::value == 0, "Static shape_div failure");
return C<shape_div(IntTupleA::value, IntTupleB::value)>{};
} else { // int int
//assert(a % b == 0 || b % a == 0); // Waive dynamic assertion
return a / b != 0 ? a / b : signum(a) * signum(b); // Division with rounding away from zero
}
CUTE_GCC_UNREACHABLE;
}
/** Minimum for Shapes
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
shape_min(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value || is_tuple<IntTupleB>::value) {
static_assert(dependent_false<IntTupleA>, "Not implemented.");
} else
if constexpr (is_constant<1, IntTupleA>::value || is_constant<1, IntTupleB>::value) {
return Int<1>{}; // _1 is less than all other shapes, preserve static
} else {
return cute::min(a, b);
}
CUTE_GCC_UNREACHABLE;
}
/** Return a tuple the same profile as A scaled by corresponding elements in B
*/
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
elem_scale(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
return transform(a, b, [](auto const& x, auto const& y) { return elem_scale(x,y); });
} else {
return a * product(b);
}
CUTE_GCC_UNREACHABLE;
}
/** Test if two IntTuple have the same profile (hierarchical rank division)
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
congruent(IntTupleA const& a, IntTupleB const& b)
{
return bool_constant<is_same<decltype(repeat_like(shape(a),_0{})),
decltype(repeat_like(shape(b),_0{}))>::value>{};
}
template <class A, class B>
using is_congruent = decltype(congruent(declval<A>(), declval<B>()));
/** Test if two IntTuple have the similar profiles up to Shape A (hierarchical rank division)
* weakly_congruent is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
weakly_congruent(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return weakly_congruent(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return true_type{};
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return weakly_congruent(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_weakly_congruent = decltype(weakly_congruent(declval<A>(), declval<B>()));
/** Test if Shape A is compatible with Shape B:
* the size of A and B are the same, and
* any coordinate into A can also be used as a coordinate into B
* compatible is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
compatible(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return compatible(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return a == size(b);
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return compatible(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_compatible = decltype(compatible(declval<A>(), declval<B>()));
/** Test if Shape A is weakly compatible with Shape B:
* there exists a Shape C congruent to A such that compatible(elem_scale(A,C), B)
* weakly_compatible is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
weakly_compatible(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return weakly_compatible(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return size(b) % a == Int<0>{};
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return weakly_compatible(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_weakly_compatible = decltype(weakly_compatible(declval<A>(), declval<B>()));
/** Replace the elements of Tuple B that are paired with an Int<0> with an Int<1>
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
return transform(a, b, [](auto const& x, auto const& y) { return filter_zeros(x,y); });
} else if constexpr (is_constant<0, IntTupleA>::value) {
return Int<1>{};
} else {
return b;
}
CUTE_GCC_UNREACHABLE;
}
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(Tuple const& t)
{
return filter_zeros(t, t);
}
//
// Converters and constructors with arrays and params
//
/** Make an IntTuple of rank N from an Indexable array.
* Access elements up to a dynamic index n, then use init (requires compatible types)
* Consider cute::take<B,E> if all indexing is known to be valid
* \code
* std::vector<int> a = {6,3,4};
* auto tup = make_int_tuple<5>(a, a.size(), 0) // (6,3,4,0,0)
* \endcode
*/
template <int N, class Indexable, class T>
CUTE_HOST_DEVICE constexpr
auto
make_int_tuple(Indexable const& t, int n, T const& init)
{
static_assert(N > 0);
if constexpr (N == 1) {
return 0 < n ? t[0] : init;
} else {
return transform(make_seq<N>{}, [&](auto i) { return i < n ? t[i] : init; });
}
CUTE_GCC_UNREACHABLE;
}
/** Fill the dynamic values of a Tuple with values from another Tuple
* \code
* auto params = make_tuple(6,3,4);
* cute::tuple<Int<1>, cute::tuple<int, int, Int<3>>, int, Int<2>> result;
* fill_int_tuple_from(result, params); // (_1,(6,3,_3),4,_2)
* \endcode
*/
template <class Tuple, class TupleV>
CUTE_HOST_DEVICE constexpr
auto
fill_int_tuple_from(Tuple& result, TupleV const& vals)
{
return fold(result, vals, [](auto const& init, auto&& r) {
if constexpr (is_static<remove_cvref_t<decltype(r)>>::value) { // Skip static elements of result
return init;
} else if constexpr (is_tuple<remove_cvref_t<decltype(r)>>::value) { // Recurse into tuples
return fill_int_tuple_from(r, init);
} else { // Assign and consume arg
static_assert(tuple_size<remove_cvref_t<decltype(init)>>::value > 0, "Not enough values to fill with!");
r = get<0>(init);
return remove<0>(init);
}
CUTE_GCC_UNREACHABLE;
});
}
/** Make a "Tuple" by filling in the dynamic values in order from the arguments
* \code
* using result_t = cute::tuple<Int<1>, cute::tuple<int, int, Int<3>>, int, Int<2>>;
* auto result = make_int_tuple_from<result_t>(6,3,4); // (_1,(6,3,_3),4,_2)
* \endcode
*/
template <class Tuple, class... Ts>
CUTE_HOST_DEVICE constexpr
Tuple
make_int_tuple_from(Ts const&... ts)
{
Tuple result = Tuple{};
fill_int_tuple_from(result, cute::make_tuple(ts...));
return result;
}
/** Convert a tuple to a flat homogeneous array of type T
* \code
* auto tup = cute::make_tuple(Int<1>{}, cute::make_tuple(6,3,Int<3>{}),4,Int<2>{});
* cute::array<uint64_t,6> result = to_array<uint64_t>(tup); // [1,6,3,3,4,2]
* \endcode
*/
template <class T = int64_t, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
to_array(IntTuple const& t)
{
auto flat_t = flatten_to_tuple(t);
constexpr int N = tuple_size<decltype(flat_t)>::value;
cute::array<T,N> result;
for_each(make_seq<N>{}, [&] (auto i) { result[i] = get<i>(flat_t); });
return result;
}
//
// Comparison operators
//
//
// There are many ways to compare tuple of elements and because CuTe is built
// on parameterizing layouts of coordinates, some comparisons are appropriate
// only in certain cases.
// -- lexicographical comparison [reverse, reflected, revref] : Correct for coords in RowMajor Layout
// -- colexicographical comparison [reverse, reflected, revref] : Correct for coords in ColMajor Layout
// -- element-wise comparison [any,all] :
// This can be very confusing. To avoid errors in selecting the appropriate
// comparison, op<|op<=|op>|op>= are *not* implemented for cute::tuple.
//
// When actually desiring to order coordinates, the user should map them to
// their indices within the Layout they came from:
// e.g. layoutX(coordA) < layoutX(coordB)
// That said, we implement the three most common ways to compare tuples below.
// These are implemented with slighly more explicit names than op<.
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less(IntTupleA const& a, IntTupleB const& b);
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less(IntTupleA const& a, IntTupleB const& b);
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less(IntTupleA const& a, IntTupleB const& b);
namespace detail {
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleB is exhausted
} else if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted, TupleB is not exhausted
} else {
return lex_less(get<I>(a), get<I>(b)) || (get<I>(a) == get<I>(b) && lex_less_impl<I+1>(a,b));
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleB is exhausted
} else if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted, TupleB is not exhausted
} else {
constexpr size_t A = tuple_size<TupleA>::value - 1 - I;
constexpr size_t B = tuple_size<TupleB>::value - 1 - I;
return colex_less(get<A>(a), get<B>(b)) || (get<A>(a) == get<B>(b) && colex_less_impl<I+1>(a,b));
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted
} else if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleA is not exhausted, TupleB is exhausted
} else {
return elem_less(get<I>(a), get<I>(b)) && elem_less_impl<I+1>(a,b);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// Lexicographical comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::lex_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_leq(T const& t, U const& u) {
return !lex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_gtr(T const& t, U const& u) {
return lex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_geq(T const& t, U const& u) {
return !lex_less(t, u);
}
// Colexicographical comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::colex_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_leq(T const& t, U const& u) {
return !colex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_gtr(T const& t, U const& u) {
return colex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_geq(T const& t, U const& u) {
return !colex_less(t, u);
}
// Elementwise [all] comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::elem_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_leq(T const& t, U const& u) {
return !elem_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_gtr(T const& t, U const& u) {
return elem_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_geq(T const& t, U const& u) {
return !elem_less(t, u);
}
namespace detail {
/** Increment a (dynamic) coord lexicographically within a shape
* @pre is_congruent<Coord,Shape>::value
* \code
* auto shape = make_shape(1,2,make_shape(2,3),3);
*
* int i = 0;
* for (auto coord = repeat_like(shape, 0); back(coord) != back(shape); increment(coord, shape)) {
* std::cout << i++ << ": " << coord << std::endl;
* }
* assert(i == size(shape));
* \endcode
*/
template <int I = 0, class Coord, class Shape>
CUTE_HOST_DEVICE constexpr
void
increment(Coord& coord, Shape const& shape)
{
if constexpr (is_integral<Coord>::value) {
++coord;
} else {
increment(get<I>(coord), get<I>(shape));
if constexpr (I+1 < tuple_size<Coord>::value) {
if (back(get<I>(coord)) == back(get<I>(shape))) {
back(get<I>(coord)) = 0;
increment<I+1>(coord, shape);
}
}
}
}
} // end namespace detail
struct ForwardCoordIteratorSentinal
{};
// A forward iterator for a starting coordinate in a shape's domain, and a shape.
// The starting coordinate may be zero but need not necessarily be.
template <class Coord, class Shape>
struct ForwardCoordIterator
{
static_assert(is_congruent<Coord, Shape>::value);
CUTE_HOST_DEVICE constexpr
Coord const& operator*() const { return coord; }
CUTE_HOST_DEVICE constexpr
ForwardCoordIterator& operator++() { detail::increment(coord, shape); return *this; }
// Sentinel for the end of the implied range
CUTE_HOST_DEVICE constexpr
bool operator< (ForwardCoordIteratorSentinal const&) const { return back(coord) < back(shape); }
CUTE_HOST_DEVICE constexpr
bool operator==(ForwardCoordIteratorSentinal const&) const { return back(coord) == back(shape); }
CUTE_HOST_DEVICE constexpr
bool operator!=(ForwardCoordIteratorSentinal const&) const { return back(coord) != back(shape); }
// NOTE: These are expensive, avoid use
CUTE_HOST_DEVICE constexpr
bool operator< (ForwardCoordIterator const& other) const { return colex_less(coord, other.coord); }
CUTE_HOST_DEVICE constexpr
bool operator==(ForwardCoordIterator const& other) const { return coord == other.coord; }
CUTE_HOST_DEVICE constexpr
bool operator!=(ForwardCoordIterator const& other) const { return coord != other.coord; }
Coord coord;
Shape const& shape;
};
// A forward iterator for a coordinate that starts from a provided coordinate
template <class Shape, class Coord>
CUTE_HOST_DEVICE constexpr
auto
make_coord_iterator(Coord const& coord, Shape const& shape)
{
return ForwardCoordIterator<Coord,Shape>{coord,shape};
}
// A forward iterator for a coordinate that starts from zero
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_coord_iterator(Shape const& shape)
{
auto coord = repeat_like(shape, int(0));
return make_coord_iterator(coord, shape);
}
} // end namespace cute
| cutlass/include/cute/int_tuple.hpp/0 | {
"file_path": "cutlass/include/cute/int_tuple.hpp",
"repo_id": "cutlass",
"token_count": 11080
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/int_tuple.hpp>
namespace cute
{
/** crd2idx(c,s,d) maps a coordinate within <Shape,Stride> to an index
*
* This is computed as follows:
* [coord, shape, and stride are all integers => step forward by stride]
* op(c, s, d) => c * d
* [coord is integer, shape and stride are tuple => divmod coord for each mode]
* op(c, (s,S), (d,D)) => op(c % prod(s), s, d) + op(c / prod(s), (S), (D))
* [coord, shape, and stride are all tuples => consider each mode independently]
* op((c,C), (s,S), (d,D)) => op(c, s, d) + op((C), (S), (D))
*/
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape,
Stride const& stride);
namespace detail {
template <class Coord, class Shape, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_ttt(Coord const& coord,
Shape const& shape,
Stride const& stride, seq<Is...>)
{
return (... + crd2idx(get<Is>(coord), get<Is>(shape), get<Is>(stride)));
}
template <class CInt, class STuple, class DTuple, int I0, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_itt(CInt const& coord,
STuple const& shape,
DTuple const& stride, seq<I0,Is...>)
{
if constexpr (sizeof...(Is) == 0) { // Avoid recursion and mod on single/last iter
return crd2idx(coord, get<I0>(shape), get<I0>(stride));
} else if constexpr (is_constant<0, CInt>::value) {
return crd2idx(_0{}, get<I0>(shape), get<I0>(stride))
+ (_0{} + ... + crd2idx(_0{}, get<Is>(shape), get<Is>(stride)));
} else { // General case
return crd2idx(coord % product(get<I0>(shape)), get<I0>(shape), get<I0>(stride))
+ crd2idx_itt(coord / product(get<I0>(shape)), shape, stride, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape,
Stride const& stride)
{
if constexpr (is_tuple<Coord>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple tuple
static_assert(tuple_size<Coord>::value == tuple_size< Shape>::value, "Mismatched Ranks");
static_assert(tuple_size<Coord>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return detail::crd2idx_ttt(coord, shape, stride, tuple_seq<Coord>{});
} else { // tuple "int" "int"
static_assert(sizeof(Coord) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) { // "int" tuple tuple
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return detail::crd2idx_itt(coord, shape, stride, tuple_seq<Shape>{});
} else { // "int" "int" "int"
return coord * stride;
}
}
CUTE_GCC_UNREACHABLE;
}
namespace detail {
template <class CTuple, class STuple, int I0, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_horner(CTuple const& coord,
STuple const& shape, seq<I0,Is...>)
{
if constexpr (sizeof...(Is) == 0) { // No recursion on single/last iter
return get<I0>(coord);
} else { // General case
return get<I0>(coord) + get<I0>(shape) * crd2idx_horner(coord, shape, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
/** crd2idx(c,s) maps a coordinate within Shape to an index
* via a colexicographical enumeration of coordinates in Shape.
* i = c0 + s0 * (c1 + s1 * (c2 + s2 * ...))
*/
template <class Coord, class Shape>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape)
{
if constexpr (is_integral<Coord>::value) { // Coord is already an index
return coord;
} else if constexpr (is_integral<Shape>::value) {
static_assert(dependent_false<Shape>, "Invalid parameters");
} else { // Make congruent, flatten, and apply Horner's method
static_assert(tuple_size<Coord>::value == tuple_size<Shape>::value, "Mismatched Ranks");
auto flat_coord = flatten(coord);
auto flat_shape = flatten(product_like(shape, coord));
return detail::crd2idx_horner(flat_coord, flat_shape, tuple_seq<decltype(flat_shape)>{});
}
CUTE_GCC_UNREACHABLE;
}
/** idx2crd(i,s,d) splits an index into a coordinate within <Shape,Stride>.
*
* This is computed as follows:
* [index, shape, and stride are all integers => determine 1D coord]
* op(i, s, d) => (i / d) % s
* [index is integer, shape and stride are tuple => determine component for each mode]
* op(i, (s,S), (d,D)) => (op(i, s, d), op(i, S, D)...)
* [index, shape, and stride are all tuples => consider each mode independently]
* op((i,I), (s,S), (d,D)) => (op(i, s, d), op((I), (S), (D)))
*
* NOTE: This only works for compact shape+stride layouts. A more general version would
* apply to all surjective layouts
*/
template <class Index, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
idx2crd(Index const& idx,
Shape const& shape,
Stride const& stride)
{
if constexpr (is_tuple<Index>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple tuple
static_assert(tuple_size<Index>::value == tuple_size< Shape>::value, "Mismatched Ranks");
static_assert(tuple_size<Index>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return transform(idx, shape, stride, [](auto const& i, auto const& s, auto const& d){ return idx2crd(i,s,d); });
} else { // tuple "int" "int"
static_assert(sizeof(Index) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) {
if constexpr (is_tuple<Stride>::value) { // "int" tuple tuple
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return transform(shape, stride, [&](auto const& s, auto const& d){ return idx2crd(idx,s,d); });
} else { // "int" tuple "int"
return transform(shape, compact_col_major(shape, stride), [&](auto const& s, auto const& d){ return idx2crd(idx,s,d); });
}
} else { // "int" "int" "int"
if constexpr (is_constant<1, Shape>::value) {
// Skip potential stride-0 division
return Int<0>{};
} else {
return (idx / stride) % shape;
}
}
}
CUTE_GCC_UNREACHABLE;
}
/** idx2crd(i,s) splits an index into a coordinate within Shape
* via a colexicographical enumeration of coordinates in Shape.
* c0 = (idx / 1) % s0
* c1 = (idx / s0) % s1
* c2 = (idx / (s0 * s1)) % s2
* ...
*/
template <class Index, class Shape>
CUTE_HOST_DEVICE constexpr
auto
idx2crd(Index const& idx,
Shape const& shape)
{
if constexpr (is_tuple<Index>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple
static_assert(tuple_size<Index>::value == tuple_size<Shape>::value, "Mismatched Ranks");
return transform(idx, shape, [](auto const& i, auto const& s) { return idx2crd(i,s); });
} else { // tuple "int"
static_assert(sizeof(Index) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) { // "int" tuple
return idx2crd(idx, shape, compact_col_major(shape));
} else { // "int" "int"
return idx;
}
}
CUTE_GCC_UNREACHABLE;
}
//
// crd2crd
//
template <class Coord, class SShape, class DShape>
CUTE_HOST_DEVICE constexpr
auto
crd2crd(Coord const& coord,
SShape const& src_shape,
DShape const& dst_shape)
{
if constexpr (is_tuple<Coord>::value && is_tuple<SShape>::value && is_tuple<DShape>::value) {
static_assert(tuple_size<Coord>::value == tuple_size<SShape>::value, "Mismatched Ranks");
static_assert(tuple_size<Coord>::value == tuple_size<DShape>::value, "Mismatched Ranks");
return transform(coord, src_shape, dst_shape, [](auto const& c, auto const& s, auto const& d) { return crd2crd(c,s,d); });
} else {
// assert(size(src_shape) == size(dst_shape))
return idx2crd(crd2idx(coord, src_shape), dst_shape);
}
CUTE_GCC_UNREACHABLE;
}
//
// Compact Major
//
// Tags for common layouts and dispatching
struct LayoutLeft; // Col-major layout mapping; leftmost extent has stride 1
using GenColMajor = LayoutLeft; // Alias
struct LayoutRight; // Row-major layout mapping; rightmost extent has stride 1
using GenRowMajor = LayoutRight; // Alias
namespace detail {
// For GCC8.5 -- Use of lambdas in unevaluated contexts. Instead use function objects.
template <class Major>
struct CompactLambda;
// @pre is_integral<Current>
// Return (result, current * product(shape)) to enable recurrence
template <class Major, class Shape, class Current>
CUTE_HOST_DEVICE constexpr
auto
compact(Shape const& shape,
Current const& current)
{
if constexpr (is_tuple<Shape>::value) { // Shape::tuple Current::int
using Lambda = CompactLambda<Major>; // Append or Prepend
using Seq = typename Lambda::template seq<Shape>; // Seq or RSeq
return cute::detail::fold(shape, cute::make_tuple(cute::make_tuple(), current), Lambda{}, Seq{});
} else { // Shape::int Current::int
if constexpr (is_constant<1, Shape>::value) {
return cute::make_tuple(Int<0>{}, current); // If current is dynamic, this could save a reg
} else {
return cute::make_tuple(current, current * shape);
}
}
CUTE_GCC_UNREACHABLE;
}
// For GCC8.5 -- Specialization LayoutLeft
template <>
struct CompactLambda<LayoutLeft>
{
template <class Init, class Shape>
CUTE_HOST_DEVICE constexpr auto
operator()(Init const& init, Shape const& si) {
auto result = detail::compact<LayoutLeft>(si, get<1>(init));
return cute::make_tuple(append(get<0>(init), get<0>(result)), get<1>(result)); // Append
}
template <class Shape>
using seq = tuple_seq<Shape>; // Seq
};
// For GCC8.5 -- Specialization LayoutRight
template <>
struct CompactLambda<LayoutRight>
{
template <class Init, class Shape>
CUTE_HOST_DEVICE constexpr auto
operator()(Init const& init, Shape const& si) {
auto result = detail::compact<LayoutRight>(si, get<1>(init));
return cute::make_tuple(prepend(get<0>(init), get<0>(result)), get<1>(result)); // Prepend
}
template <class Shape>
using seq = tuple_rseq<Shape>; // RSeq
};
} // end namespace detail
template <class Major, class Shape, class Current = Int<1>,
__CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)>
CUTE_HOST_DEVICE constexpr
auto
compact_major(Shape const& shape,
Current const& current = {})
{
if constexpr (is_tuple<Current>::value) { // Shape::tuple Current::tuple
static_assert(is_tuple<Shape>::value, "Invalid parameters");
static_assert(tuple_size<Shape>::value == tuple_size<Current>::value, "Mismatched Ranks");
// Recurse to apply to the terminals of current
return transform(shape, current, [&](auto const& s, auto const& c){ return compact_major<Major>(s,c); });
} else {
return get<0>(detail::compact<Major>(shape, current));
}
CUTE_GCC_UNREACHABLE;
}
//
// Compact Col Major
//
struct LayoutLeft {
template <class Shape>
using Apply = decltype(compact_major<LayoutLeft>(declval<Shape>()));
};
template <class Shape, class Current = Int<1>>
CUTE_HOST_DEVICE constexpr
auto
compact_col_major(Shape const& shape,
Current const& current = {})
{
return compact_major<LayoutLeft>(shape, current);
}
//
// Compact Row Major
//
struct LayoutRight {
template <class Shape>
using Apply = decltype(compact_major<LayoutRight>(declval<Shape>()));
};
template <class Shape, class Current = Int<1>>
CUTE_HOST_DEVICE constexpr
auto
compact_row_major(Shape const& shape,
Current const& current = {})
{
return compact_major<LayoutRight>(shape, current);
}
//
// Compact Order -- compute a compact stride based on an ordering of the modes
//
namespace detail {
// @pre weakly_congruent(order, shape)
// @pre is_congruent<RefShape, RefOrder>
// @pre is_static<Order>
// @pre is_static<RefOrder>
template <class Shape, class Order, class RefShape, class RefOrder>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, Order const& order,
RefShape const& ref_shape, RefOrder const& ref_order)
{
if constexpr (is_tuple<Order>::value) {
static_assert(tuple_size<Shape>::value == tuple_size<Order>::value, "Need equal rank of shape and order");
return transform(shape, order, [&](auto const& s, auto const& o) { return compact_order(s, o, ref_shape, ref_order); });
} else {
// Compute the starting stride for this shape by accumulating all shapes corresponding to lesser orders
auto stride_start = product(transform(ref_shape, ref_order,
[&](auto const& s, auto const& o) {
return conditional_return(o < order, s, Int<1>{});
}));
return compact_col_major(shape, stride_start);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Shape, class Order>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, Order const& order)
{
auto ref_shape = flatten_to_tuple(product_like(shape, order));
auto flat_order = flatten_to_tuple(order);
// Find the largest static element of order
auto max_order = cute::fold(flat_order, Int<0>{}, [](auto v, auto order) {
if constexpr (is_constant<true, decltype(v < order)>::value) {
return order;
} else {
return v;
}
CUTE_GCC_UNREACHABLE;
});
// Replace any dynamic elements within order with large-static elements
auto max_seq = make_range<max_order+1, max_order+1+rank(flat_order)>{};
auto ref_order = cute::transform(max_seq, flat_order, [](auto seq_v, auto order) {
if constexpr (is_static<decltype(order)>::value) {
return order;
} else {
return seq_v;
}
CUTE_GCC_UNREACHABLE;
});
auto new_order = unflatten(ref_order, order);
return detail::compact_order(shape, new_order, ref_shape, ref_order);
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, GenColMajor const& major)
{
return compact_major<LayoutLeft>(shape);
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, GenRowMajor const& major)
{
return compact_major<LayoutRight>(shape);
}
} // end namespace cute
| cutlass/include/cute/stride.hpp/0 | {
"file_path": "cutlass/include/cute/stride.hpp",
"repo_id": "cutlass",
"token_count": 6621
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the operation implied by MMA.
struct OpMultiplyAdd {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the result is saturated to MAX_FLOAT|MIN_FLOAT or MAX_INT|MIN_INT
struct OpMultiplyAddSaturate {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (BF16)
struct OpMultiplyAddFastBF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (F16)
struct OpMultiplyAddFastF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input data types are mixed and the narrower type is
/// upcasted to the wider type
struct OpMultiplyAddMixedInputUpcast {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every F32 output element
struct OpMultiplyAddFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every complex<F32> output element
struct OpMultiplyAddComplexFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating that staged accumulation is not to be used. This is valid only for SM89
/// FP8 kernels.
struct OpMultiplyAddFastAccum;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the complex multiply-add operation
struct OpMultiplyAddComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the gaussian complex multiply-add operation
struct OpMultiplyAddGaussianComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (XOR, POPC)
struct OpXorPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (AND, POPC)
struct OpAndPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying math operators as thread-level operations.
struct OpClassSimt {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core operations.
struct OpClassTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as WMMA Tensor Core operations
struct OpClassWmmaTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core with structure sparse operations.
struct OpClassSparseTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator
>
struct Mma;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - specialized for 1x1x1x1 matrix multiply operation
template <
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator_
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC_, LayoutC, Operator_> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = Operator_;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(
Array<ElementC, 1> &d,
Array<ElementA, 1> const &a,
Array<ElementB, 1> const &b,
Array<ElementC, 1> const &c
) {
multiply_add<ElementA, ElementB, ElementC> op;
d[0] = op(a[0], b[0], c[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specifies internal data type for computation
struct SPFormatType {
enum Kind {
Thread
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator,
/// Specifies meta data format
SPFormatType::Kind SPFormat = SPFormatType::Thread
>
struct SparseMma;
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#include "cutlass/arch/mma_sm50.h"
#include "cutlass/arch/mma_sm60.h"
#include "cutlass/arch/mma_sm61.h"
#include "cutlass/arch/mma_sm70.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sparse_sm80.h"
#include "cutlass/arch/mma_sm89.h"
#include "cutlass/arch/mma_sparse_sm89.h"
#include "cutlass/arch/mma_sm90.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
namespace detail {
/// Helper for determining whether staged accumulation should be used for a given operator
template <typename Operator>
struct UseStagedAccumulation {
static bool const value = platform::is_same<typename Operator::MathOperator, OpMultiplyAddFastF32>::value ||
platform::is_same<typename Operator::MathOperator, OpMultiplyAddComplexFastF32>::value ||
is_sm89_staged_policy_v<Operator>;
};
} // namespace detail
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/arch/mma.h/0 | {
"file_path": "cutlass/include/cutlass/arch/mma.h",
"repo_id": "cutlass",
"token_count": 2356
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for half
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::half_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::half_t, ///< ElementB
LayoutB_, ///< LayoutB
ElementC_, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::half_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::half_t;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm70;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for f16 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// check supported wmma output data type for the given multiplicand data types
static_assert(
platform::is_same<cutlass::half_t, ElementC>::value || platform::is_same<float, ElementC>::value,
"Supported of wmma output data type for f16 multiplicands are: f16 and f32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync for floating point multiplicands is avialable only for SM70 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| cutlass/include/cutlass/arch/wmma_sm70.h/0 | {
"file_path": "cutlass/include/cutlass/arch/wmma_sm70.h",
"repo_id": "cutlass",
"token_count": 1912
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and two stage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/kernel/default_conv2d_fprop.h/0 | {
"file_path": "cutlass/include/cutlass/conv/kernel/default_conv2d_fprop.h",
"repo_id": "cutlass",
"token_count": 19809
} | 24 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a GEMM with Broadcast based on an existing UniversalGemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_deconv3d.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
>
struct DefaultDeconv3dWithBroadcast {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
StrideSupport
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastTensorOp<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ImplicitGemmBase::Epilogue::kPartitionsK,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv3d specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv3dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimt<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv3dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimtStridedDgrad<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/kernel/default_deconv3d_with_broadcast.h/0 | {
"file_path": "cutlass/include/cutlass/conv/kernel/default_deconv3d_with_broadcast.h",
"repo_id": "cutlass",
"token_count": 2862
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_z_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
offset_z_[s] = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int z = offset_z_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - filter_t_);
r = (problem_size_.R - 1 - filter_r_);
s = (problem_size_.S - 1 - filter_s_);
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h/0 | {
"file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h",
"repo_id": "cutlass",
"token_count": 3221
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting depthwise related simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/warp/mma_depthwise_simt.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
namespace detail {
//
// Convert a WarpShapeM which is the whole tile of elements into the number of elements (2D) held by
// each partitions within warp.
// The goal is for each thread's tile of elements to be as square as
// possible for performance (4x4 will be faster than 2x8).
template<int WarpShapeM, // The number of elements (1D) contained in the entire warp
int WarpNumThreadsM> // The number of partitions within the warp
struct SimtWarpShape {
// kP * kQ * WarpNumThreadsM = WarpShapeM
// If needed, enable more specializations.
};
template <>
struct SimtWarpShape<4, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<8, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<16, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <int WarpNumThreadsM>
struct SimtWarpShape<25, WarpNumThreadsM> {
static_assert(WarpNumThreadsM == 1, "WarpShapeM could not be evenly splited by threads");
static constexpr int kP = 5;
static constexpr int kQ = 5;
};
template <>
struct SimtWarpShape<32, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 8;
};
template <>
struct SimtWarpShape<32, 2> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<32, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
} // namespace detail
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of threadblock-scoped output tile
typename ThreadBlockOutputShape,
/// Shape of filter shape per threadblock
typename FilterShape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DepthwiseMmaCoreWithLaneAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : cutlass::gemm::threadblock::DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeA_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeB_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseMmaCoreWithLaneAccessSize<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
kLaneAccessSizeB_,
2,
Operator_> : public cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_> {
using Base = cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_>;
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeA = kLaneAccessSizeA_;
static int const kLaneAccessSizeB = kLaneAccessSizeB_;
// Divisility requirements
static_assert( kLaneAccessSizeA > 0 && kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = typename Base::WarpCount;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory are same as base class
//
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = cutlass::gemm::threadblock::detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = kLaneAccessSizeA / sizeof_bits<ElementA>::value;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::gemm::threadblock::MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, 1>, // Set kStrided = 1 because activation shape is runtime value.
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<1, Shape::kN>, // set kRow is 1 because it is a runtime value
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
true // Dynamic iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
// Get output P, Q per thread
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape_, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using ActivationShape = ActivationShape_;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<ActivationShape::kC, ActivationShape::kNHW>,
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<ActivationShape::kNHW, ActivationShape::kC>,
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
false // static iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
IteratorAlgorithm::kFixedStrideDilation, /// Iterator algo type
StrideShape, /// Stride ( MatrixShape<Height, Width> )
DilationShape, /// Dilation ( MatrixShape<Height, Width> )
ActivationShape /// Activation Shape loaded by threadblock
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| cutlass/include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h/0 | {
"file_path": "cutlass/include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h",
"repo_id": "cutlass",
"token_count": 14729
} | 27 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/epilogue/fusion/operations.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dispatch interface for epilogue fusion callbacks
// For visitor fusions, this is just a convenience wrapper to provide metadata and non-nested args.
// It is also valid to just pass visitor callbacks directly to the collective, e.g. fusion::Sm90LinearCombination,
// provided the collective supports a visitor callbacks interface. This is useful for implementing custom fusions.
template <
class DispatchPolicy, // specialize on collective's dispatch policy since callbacks API will depend on collective's algorithm
class Operation, // the fusion operation being performed, e.g. fusion::LinearCombination
class CtaTile_MNK, // computed tile per CTA
class EpilogueTile_MN, // epilogue subtile size
class... Args // callbacks implementation dependent args (e.g. copy atoms, smem layouts)
>
struct FusionCallbacks {
static_assert(cutlass::detail::dependent_false<DispatchPolicy, Operation>, "Could not find a callbacks specialization.");
};
// Metadata helper to handle custom EVTs or other non-FusionCallbacks types
template <class T>
struct FusionCallbacksTraits {
using DispatchPolicy = void;
using Operation = T;
using CtaTile_MNK = void;
using EpilogueTile_MN = void;
using ElementCompute = void;
};
template <
class DispatchPolicy_,
class Operation_,
class CtaTile_MNK_,
class EpilogueTile_MN_,
class... Args
>
struct FusionCallbacksTraits<
FusionCallbacks<DispatchPolicy_, Operation_, CtaTile_MNK_, EpilogueTile_MN_, Args...>
> {
using DispatchPolicy = DispatchPolicy_;
using Operation = Operation_;
using CtaTile_MNK = CtaTile_MNK_;
using EpilogueTile_MN = EpilogueTile_MN_;
using ElementCompute = typename Operation::ElementCompute;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/fusion/callbacks.hpp/0 | {
"file_path": "cutlass/include/cutlass/epilogue/fusion/callbacks.hpp",
"repo_id": "cutlass",
"token_count": 1049
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped complex GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization and defines sensible defaults for epilogues for complex*complex case
// 4 real-valued mma operations (Complex)
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Epilogue Shape
typename Shape_,
/// Warp-level mma operator
typename WarpMmaTensorOp_,
/// Number of k partitions
int PartitionsK,
/// Epilogue output operator
typename OutputOp_,
/// Elements accessed by inner-most loop of AccumulatorFragmentIterator::load()
int ElementsPerAccess,
/// Multiply-add operator
/// Selects between (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_ = arch::OpMultiplyAddComplex
>
struct DefaultEpilogueComplexTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = Operator_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization and defines sensible defaults for epilogues for complex*complex case
// 3 real-valued mma operations (Gaussian Complex)
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueComplexTensorOp <Shape_, WarpMmaTensorOp_, PartitionsK,
OutputOp_, ElementsPerAccess,
arch::OpMultiplyAddGaussianComplex
> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = arch::OpMultiplyAddGaussianComplex;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorGaussianComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h/0 | {
"file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h",
"repo_id": "cutlass",
"token_count": 2868
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Element_>
class DirectStoreEpilogueIterator {
public:
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = 1;
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout) {
stride = layout.stride(0) * sizeof(Element);
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
public:
//
// Data members
//
Element *pointer; // pointer to the output matrix
LongIndex stride; // stride in elements between rows
TensorCoord extent; // extent of output matrix
int thread_idx; // thread index
TensorCoord threadblock_offset;
public:
/// Constructor
CUTLASS_DEVICE
DirectStoreEpilogueIterator(
PredicatedTileIteratorParams const & params,
Element *pointer_,
TensorCoord extent_,
int thread_idx_,
TensorCoord threadblock_offset_ = TensorCoord(),
int const * indices = nullptr
):
pointer(pointer_),
stride(params.stride / sizeof(Element)),
extent(extent_),
thread_idx(thread_idx_),
threadblock_offset(threadblock_offset_)
{
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h/0 | {
"file_path": "cutlass/include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h",
"repo_id": "cutlass",
"token_count": 1332
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs.
This does not attempt to target any particular output layout. Instead, each threadblock
streams out its accumulator elements using 128b store operations. This assumes all threadblocks
have unique output tiles.
The target data layout is:
- threadblock indices mapped to linear offsets as (m, n, k), where m is fastest-changing
- threadblock output space partitioned into warps; each warp's region is contiguous
- per-thread accumulators partitioned into 128b accesses
- output memory striped across the threads of a warp
This enables very fast streaming of data, completely limited by the memory system. No predication
or data exchange is performed, and each threadblock is assumed to have a full region of memory
to write to.
This epilogue establishes an upper bound for epilogue performance and is suitable for
reductions across the GEMM K dimension which require a separate workspace.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of accumulator tile (concept: MatrixShape)
int WarpCount, ///< number of warps
typename FragmentC_ ///< warp-level GEMM operator (concept: gemm::warp::Mma)
>
class EpilogueWorkspace {
public:
using Shape = Shape_;
using FragmentC = FragmentC_;
using ElementC = typename FragmentC::value_type;
static int const kWarpCount = WarpCount;
/// Optimize for 128b accesses
static int const kAccessSizeInBits = 128;
/// Warp size from the perspective of memory operations
static int const kWarpSize = 32;
/// Vector length of accesses
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementC>::value;
/// Number of stores per thread
static int const kIterations = FragmentC::kElements / kElementsPerAccess;
static_assert(
!(FragmentC::kElements % kElementsPerAccess),
"The number of accumulators must be divisible by the access size.");
/// Total number of vectorized accesses in warp (in units of vector)
static int const kWarpAccesses = kIterations * kWarpSize;
/// Total number of vectorized accesses in threadblock tile (in units of vector)
static int const kThreadblockAccesses = kWarpAccesses * kWarpCount;
/// Parameters structure
struct Params {
/// Pointer to C matrix
ElementC *ptr_C;
/// Stride between tiles along the GEMM N dimension (in units of vectors)
int stride_n;
/// Stride between tiles along the GEMM K dimension (in units of vectors)
int stride_k;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(
ElementC *ptr_C, ///< Pointer to C matrix
int stride_n_, ///< Stride between tiles along the GEMM N dimension (in units of ElementC)
int stride_k_ ///< Stride between tiles along the GEMM K dimension (in units of ElementC)
):
ptr_C(ptr_C), stride_n(stride_n_ / kElementsPerAccess), stride_k(stride_k_ / kElementsPerAccess) {
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
// Intentionally empty
};
private:
struct alignas((kAccessSizeInBits / 8)) AccessType {
Array<ElementC, kElementsPerAccess> storage;
};
/// Constant reference to parameters object
AccessType *pointer_;
/// Stride between tiles along the n dimension (in vectors)
int stride_n_;
/// Stride between tiles along the k dimension (in vectors)
int stride_k_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWorkspace(
Params const ¶ms, ///< Host-constructable params object
SharedStorage &, ///< Shared storage object
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
pointer_(reinterpret_cast<AccessType *>(params.ptr_C)),
stride_n_(params.stride_n),
stride_k_(params.stride_k) {
// Add per-thread offset
pointer_ += lane_idx + warp_idx * kWarpAccesses;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
cutlass::gemm::GemmCoord problem_size, ///< Problem size of GEMM (units of ElementC)
cutlass::gemm::GemmCoord tb_tile_coord, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
FragmentC const &accum) { ///< Accumulator tile
// Compute offset for entire threadblock (note, per-thread offset has been folded in already)
AccessType *pointer = pointer_ +
tb_tile_coord.m() * kThreadblockAccesses +
tb_tile_coord.n() * stride_n_ +
tb_tile_coord.k() * stride_k_;
// Cast to vectorized view of accumulator fragments
AccessType const * src_pointer = reinterpret_cast<AccessType const *>(&accum);
// Write out accumulators at full speed
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
pointer[i * kWarpSize] = src_pointer[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/threadblock/epilogue_workspace.h/0 | {
"file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_workspace.h",
"repo_id": "cutlass",
"token_count": 2162
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief PredicatedTileIteratorPredicates.
PredicatedTileIteratorPredicates enables both upper and lower bounds for predicates.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator predicates used to bound computations in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorPredicates {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index lower_extent_row_;
Index upper_extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(lower_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(upper_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPredicates(
PredicatedTileIteratorParams const & params,
TensorCoord lower_extent,
TensorCoord upper_extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
lower_extent_row_ = lower_extent.row();
upper_extent_row_ = upper_extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < upper_extent.column()) &&
((thread_offset.column() + ThreadMap::Delta::kColumn * c) >= lower_extent.column());
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPredicates &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Gets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
///< Gets lower_extent_row_
CUTLASS_DEVICE Index get_lower_extent_row() {
return lower_extent_row_;
}
///< Gets upper_extent_row_
CUTLASS_DEVICE Index get_upper_extent_row() {
return upper_extent_row_;
}
///< Gets thread_start_row_
CUTLASS_DEVICE Index get_thread_start_row() {
return thread_start_row_;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h/0 | {
"file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h",
"repo_id": "cutlass",
"token_count": 3058
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
#include "cutlass/epilogue/warp/volta_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape)
typename ElementC, ///< Accumulator layout
typename Layout ///< target shared memory layout
>
struct TileIteratorVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = half_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int quad_row_idx = ((quad_id & 4) >> 1) + (quad_id & 1);
int quad_col_idx = ((quad_id & 2) >> 1);
int row = quad_row_idx * Detail::kRowsPerQuad + lane_in_quad;
int column = quad_col_idx * Detail::kColumnsPerQuad;
pointer_ += layout_({row, column / kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * InterleavedTileShape::kN / Policy::kElementsPerAccess +
access_quad * Detail::kAccessQuadDelta / Policy::kElementsPerAccess +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
AccessType access_vector = frag_ptr[frag_idx];
pointer_[ptr_offset] = access_vector;
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * Detail::kTileDelta + access_quad * Detail::kAccessQuadDelta +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
frag_ptr[frag_idx] = pointer_[ptr_offset];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = float;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int const kQuadRowDelta = 4;
int const kQuadColumnDelta = 2 * Policy::MmaIterations::kColumn;
int quad_row_offset = ((quad_id & 4) / 2 + (quad_id & 1)) * kQuadRowDelta;
int quad_column_offset = (quad_id & 2) / 2 * kQuadColumnDelta;
int thread_row_offset = (lane_in_quad & 1);
int thread_column_offset = (lane_in_quad & 2) / 2;
int row = quad_row_offset + thread_row_offset;
int column = quad_column_offset + thread_column_offset;
pointer_ += layout_({row, column});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
int const kAccessesPerRow = Policy::TileIterations::kColumn * Policy::MmaIterations::kColumn * 2;
CUTLASS_PRAGMA_UNROLL
for (int row_idx = 0; row_idx < Policy::kRowsPerMmaTile; ++row_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < kAccessesPerRow; ++access_idx) {
int frag_idx = row_idx * kAccessesPerRow + access_idx;
int ptr_column_offset = (access_idx & 1) * 2 +
(access_idx & 2) * Policy::MmaIterations::kColumn * 2 +
(access_idx & 4) * Policy::MmaIterations::kColumn * 2;
int ptr_row_offset = row_idx * 2;
int ptr_offset = layout_({ptr_row_offset, ptr_column_offset}) + pointer_offset / Policy::kElementsPerAccess;
pointer_[ptr_offset] = frag_ptr[frag_idx];
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
assert(0);
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h/0 | {
"file_path": "cutlass/include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h",
"repo_id": "cutlass",
"token_count": 4836
} | 33 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/transform/collective/sm90_wgmma_transpose.hpp"
#include "cutlass/trace.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop
template <
int Stages,
class ClusterShape_,
class TileShape_,
class KernelSchedule,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>;
using TileShape = TileShape_;
using ClusterShape = ClusterShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
// Swap and transpose A/B for A k-major layout and B mn-major layout since WGMMA is k-major only (e.g. tf32, Fp32, Int8, Fp8 WGMMA)
static constexpr bool IsLayoutAkBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::RowMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool IsInputSizeTwoBytes = sizeof(ElementA) == 2 && sizeof(ElementB) == 2;
static constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn;
using InternalGmemTiledCopyA = cute::conditional_t<!SwapAB, GmemTiledCopyA, GmemTiledCopyB>;
using InternalGmemTiledCopyB = cute::conditional_t<!SwapAB, GmemTiledCopyB, GmemTiledCopyA>;
using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>;
using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>;
using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>;
using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>;
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>;
using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>;
using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>;
using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using MainloopPipeline = cutlass::PipelineAsync<DispatchPolicy::Stages>;
using PipelineState = typename MainloopPipeline::PipelineState;
using PipelineParams = typename MainloopPipeline::Params;
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
InternalSmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
InternalSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
// If A mn-layout and B mn-layout, transposing B matrix since WGMMA is k-major only (e.g. tf32, fp32, fp8, int8).
static constexpr bool IsLayoutAmnBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::ColumnMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool TransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn;
using TransposeOperandB = decltype(cutlass::transform::collective::detail::make_transpose_operand_b(
0, 0, TiledMma{}, SmemLayoutB{}, InternalSmemLayoutAtomB{},
InternalElementB{}, cute::bool_constant<TransposeB>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source A from rmem and B operand from smem_desc for this mainloop.");
using GmmaSmemLayoutAtomB = decltype(transform::collective::detail::gmma_smem_transpose_or_passthrough<
TransposeB, InternalSmemLayoutAtomB, InternalElementB>());
// SmemLayoutB for GMMA is different from SmemLayoutB for TMA if TransposeB
using GmmaSmemLayoutB = decltype(tile_to_shape(
GmmaSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(!SwapAB || !TransposeB, "Cannot SwapAB and TransposeB at the same time.");
static_assert(TransposeB xor (cute::is_same_v<SmemLayoutB, GmmaSmemLayoutB>),
"Should be same layout if not TransposeB.");
static_assert(!TransposeB || (cutlass::bits_to_bytes(size<1>(SmemLayoutB{}) * sizeof_bits<InternalElementB>::value)) == 128,
"SmemLayoutB K must be 128bytes to be transposed.");
static_assert(!transform::collective::detail::use_universal_transposition<InternalSmemLayoutAtomB, InternalElementB>(),
"Warp specialized ARF kernels have not supported universal B transposition yet.");
struct SharedStorage
{
struct TensorStorage : cute::aligned_struct<256> {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>, 256> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>, 256> smem_B;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
InternalElementA const* ptr_A = nullptr;
InternalStrideA dA{};
InternalElementB const* ptr_B = nullptr;
InternalStrideB dB{};
uint32_t mma_promotion_interval = 4;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
if constexpr (not SwapAB) {
return {
reinterpret_cast<InternalElementA const*>(args.ptr_A),
args.dA,
reinterpret_cast<InternalElementB const*>(args.ptr_B),
args.dB
};
}
else {
return {
reinterpret_cast<InternalElementA const*>(args.ptr_B),
args.dB,
reinterpret_cast<InternalElementB const*>(args.ptr_A),
args.dA
};
}
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyA::NumValSrc>(cute::make_shape(M,K,L), StrideA{});
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyB::NumValSrc>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr int K_PIPE_MMAS = 1;
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA,
class TensorB,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
load(
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
TensorA const& gA_in,
TensorB const& gB_in,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
TensorStorage& shared_tensors)
{
using namespace cute;
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
Tensor gA = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA_in);
Tensor gB = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB_in);
// Partition the copying of A and B tiles across the threads
InternalGmemTiledCopyA gmem_tiled_copy_a;
InternalGmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
// 0-th stage with predication on k to account for residue
{
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter, predicating for k residue
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tAsA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,write_stage));
}
else {
clear(tAsA(_,_,k,write_stage));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBsB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,write_stage));
}
else {
clear(tBsB(_,_,k,write_stage));
}
}
++k_tile_iter;
--k_tile_count;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
++k_tile_iter;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(
MainloopPipeline pipeline,
PipelineState smem_pipe_write) {
// Issue the epilogue waits
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params)
{
using namespace cute;
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2.");
static_assert(!cute::is_void_v<InternalSmemCopyAtomA>,
"SM90 GMMA mainloops must specify a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<InternalSmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
[[maybe_unused]] int warp_group_thread_idx = thread_idx % 128;
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_M,BLK_K,PIPE)
// If TransposeB, GMMA will read from transposed B layout SMEM
Tensor gmma_sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), GmmaSmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
// Allocate fragments and descriptors
Tensor tCsA = thread_mma.partition_A(sA);
Tensor tCrA = thread_mma.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(gmma_sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// Copy Atom A retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX),
"ERROR : Incorrect number of MMAs in flight");
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
TransposeOperandB transpose = cutlass::transform::collective::detail::make_transpose_operand_b(
warp_idx, warp_group_thread_idx, tiled_mma, SmemLayoutB{},
InternalSmemLayoutAtomB{}, InternalElementB{},
cute::bool_constant<TransposeB>{});
warpgroup_fence_operand(accum);
// first k tile
{
pipeline.consumer_wait(smem_pipe_read);
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone);
// copy smem->rmem for A operand
copy(smem_tiled_copy_A, tCsA(_,_,0,read_stage), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, read_stage, 0);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
transpose.synchronize();
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
}
warpgroup_wait<2>();
if (k_tile_count - 1 > 0) {
if (!skip_wait) {
pipeline.consumer_wait(smem_pipe_read);
}
copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
--k_tile_count;
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 1; --k_tile_count) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone);
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
if (k_block == size<2>(tCrA) - 1) {
if (!skip_wait) {
pipeline.consumer_wait(smem_pipe_read);
}
copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
} else {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
// transpose B operand in SMEM
if (k_block < 2) {
transpose.synchronize(k_block); // make transpose of k_block available
}
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
}
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
if (k_tile_count > 0) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
if (k_block < 2) {
transpose.synchronize(k_block); // make k_block transpose available
}
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp/0 | {
"file_path": "cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp",
"repo_id": "cutlass",
"token_count": 12653
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to GEMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible GEMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's SGEMM NN
is as follows:
//
// Instantiate the CUTLASS GEMM operator.
//
cutlass::gemm::device::Gemm<
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor
> gemm_op;
//
// Launch the GEMM operation on the device
//
cutlass::Status status = gemm_op({
{m, n, k}, // GemmCoord problem_size,
{A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A,
{B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B,
{C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
{D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
{alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages
>
class Gemm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
TensorRef<ElementE const, LayoutE> ref_E;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
TensorRef<ElementE, LayoutE> ref_E_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemm() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/device/gemm_sparse.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/device/gemm_sparse.h",
"repo_id": "cutlass",
"token_count": 6306
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a TRMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/kernel/default_trmm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Trmm device-level operator. This is an interface to efficient CUTLASS TRMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to TRMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible TRMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS TRMM operator implementing the functionality of cuBLAS's STRMM NN
is as follows:
//
// Instantiate the CUTLASS TRMM operator.
//
cutlass::gemm::device::Trmm<
float,
cutlass::layout::ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
> trmm_op;
//
// Launch the TRMM operation on the device
//
cutlass::Status status = trmm_op({
cutlass::gemm::GemmUniversalMode, // Trmm Problem Mode
{m, n, m/n}, // GemmCoord problem_size (k is based on left- or right-side mode)
batch_count,
{alpha}, // EpilogueOutputOp::Params epilogue_op_params
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int lda,
int ldb,
int ldc
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// DiagType for A (kNonUnit or kUnit)
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA
>
class Trmm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = epilogue::thread::LinearCombination<
ElementC_,
128 / sizeof_bits<ElementC_>::value,
ElementAccumulator_,
ElementAccumulator_,
epilogue::thread::ScaleType::OnlyAlphaScaling
>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by TRMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone>
class Trmm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementB_, ElementA_>::type;
using LayoutAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutB_, LayoutA_>::type;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementA_, ElementB_>::type;
using LayoutBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutA_, LayoutB_>::type;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentAKernel = (SideModeA == SideMode::kRight) ? AlignmentB : AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentBKernel = (SideModeA == SideMode::kRight) ? AlignmentA : AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
// Complex Transform don't appply to B
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static ComplexTransform const kTransformAKernel = (SideModeA == SideMode::kRight) ?
ComplexTransform::kNone : TransformA;
static ComplexTransform const kTransformBKernel = (SideModeA == SideMode::kRight) ?
TransformA : ComplexTransform::kNone;
/// Define the kernel
using TrmmKernel = typename kernel::DefaultTrmmUniversal<
ElementAKernel,
LayoutAKernel,
kTransformAKernel,
kAlignmentAKernel,
ElementBKernel,
LayoutBKernel,
kTransformBKernel,
kAlignmentBKernel,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::TrmmKernel;
using Arguments = typename TrmmKernel::Arguments;
private:
/// Kernel parameters object
typename TrmmKernel::Params params_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = TrmmKernel::can_implement(args);
if (SideModeA == SideMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (FillModeA == FillMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (DiagTypeA == DiagType::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Swapping argument for A and B, if A was on the right side (problem size doesn't need to change here).
if (kSideMode == SideMode::kRight) {
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args.swapped_matrices(),
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(TrmmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename TrmmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<TrmmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<TrmmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/********************************************************************************************************
TRMM has 4 combinations based on Layouts {RowMajor, ColumnMajor} x Side mode {LeftSide, RightSide}
In templates and arguments to cutlass kernel, `matrix A` is always triangular, and `matrix B` is rectangular.
(adhering to the cuBLAS convention)
For the mainloop and trmm kernel, `A` and `B` points to left-side and right-side matrices, respectively.
Thus, for LeftSide mode `A` and `B` points to `matrix A` and `matrix B`, respectively. While for
the RightSide mode `A` and `B` points to `matrix B` and `matrix A`, respectively.
Additionally, CUTLASS GEMM epilogue is always RowMajor, and ColumnMajor output is achieved by
transposing the GEMM problem. Thus, ColumnMajor output layout for TRMM requires:
- Transposing `matrix A` and `matrix B` layouts
- Swapping problem size m and n values
- Swapping LeftSide and RightSide mode
RowMajor output: D = matrix A x matrix B
ColumnMajor output: D = matrix A x matrix B -> Transpose (D) = Transpose(matrix B) x Transpose(matrix A)
{RowMajor, ColumnMajor} x Side Mode {LeftSide, RightSide} 4 cases:
1. LeftSide mode and RowMajor output (default template)
2. LeftSide mode and ColumnMajor output
3. RightSide mode and RowMajor output
4. RightSide mode and ColumnMajor output
Mapping ColumnMajor output layout cases 2 and 4 to RowMajor efficient epilogue implementation:
Case 2 -> Case 3:
D_col = matrix A x matrix B (LeftSide mode)
=> Transpose(D_col) = Transpose(matrix B) x Transpose(matrix A) (RightSide mode)
swap pointers for `A` and `B` call GEMM mainloop with RowMajor efficient-epilogue
Case 4 -> Case 1:
D_col = matrix B x matrix A (RightSide mode)
=> Transpose(D_col) = Transpose(matrix A) x Transpose(matrix B) (LeftSide mode)
call GEMM mainloop for with RowMajor efficient-epilogue
********************************************************************************************************/
/// Partial specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K as a serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA>
class Trmm<ElementA_, LayoutA_, SideModeA, FillModeA, DiagTypeA,
ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, SplitKSerial,
Operator_, TransformA> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
// Changing SideMode as we change the layout
static SideMode const kSideModeT = (SideModeA == SideMode::kLeft) ?
SideMode::kRight : SideMode::kLeft;
// Changing FillMode as we change the layout
static FillMode const kFillModeT = (FillModeA == FillMode::kLower) ?
FillMode::kUpper : FillMode::kLower;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
// Complex Transform don't appply to B
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static bool const kSplitKSerial = SplitKSerial;
using UnderlyingOperator = Trmm<
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
kSideModeT,
kFillModeT,
kDiagType,
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kAlignmentB,
kSplitKSerial,
Operator,
TransformA
>;
using Arguments = typename UnderlyingOperator::Arguments;
using TrmmKernel = typename UnderlyingOperator::TrmmKernel;
static int const kAlignmentC = UnderlyingOperator::kAlignmentC;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Helper to construct a transposed equivalent for the underying TRMM operator which is identical
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem_size();
}
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/device/trmm.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/device/trmm.h",
"repo_id": "cutlass",
"token_count": 9050
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a Stream-K GEMM that can broadcast a bias vector in the epilogue.
Similar structure to DefaultGemmWithBroadcast, but uses its own epilogue
(DefaultStreamkEpilogueWithBroadcastTensorOp) and its own GEMM kernel
(GemmStreamkWithFusedEpilogue).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable = void
>
struct DefaultGemmStreamkWithBroadcast {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultStreamkEpilogueWithBroadcastTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmStreamkWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h",
"repo_id": "cutlass",
"token_count": 1552
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplexArray {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
typename LayoutA::Stride::Index lda_real{};
typename LayoutA::Stride::Index lda_imag{};
typename LayoutB::Stride::Index ldb_real{};
typename LayoutB::Stride::Index ldb_imag{};
typename LayoutC::Stride::Index ldc_real{};
typename LayoutC::Stride::Index ldc_imag{};
typename LayoutC::Stride::Index ldd_real{};
typename LayoutC::Stride::Index ldd_imag{};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
int const *ptr_M,
int const *ptr_N,
int const *ptr_K,
void const * const * ptr_A_real,
void const * const * ptr_A_imag,
void const * const * ptr_B_real,
void const * const * ptr_B_imag,
void const * const * ptr_C_real,
void const * const * ptr_C_imag,
void * const * ptr_D_real,
void * const * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_M(ptr_M),
ptr_N(ptr_N),
ptr_K(ptr_K),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_M, args.ptr_N);
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real{};
typename Mma::IteratorA::Params params_A_imag{};
typename Mma::IteratorB::Params params_B_real{};
typename Mma::IteratorB::Params params_B_imag{};
typename Epilogue::OutputTileIterator::Params params_C_real{};
typename Epilogue::OutputTileIterator::Params params_C_imag{};
typename Epilogue::OutputTileIterator::Params params_D_real{};
typename Epilogue::OutputTileIterator::Params params_D_imag{};
typename EpilogueOutputOp::Params output_op{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
ptr_M(args.ptr_M),
ptr_N(args.ptr_N),
ptr_K(args.ptr_K),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(args.ptr_A_real),
ptr_A_imag(args.ptr_A_imag),
ptr_B_real(args.ptr_B_real),
ptr_B_imag(args.ptr_B_imag),
ptr_C_real(args.ptr_C_real),
ptr_C_imag(args.ptr_C_imag),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_M = args.ptr_M;
ptr_N = args.ptr_N;
ptr_K = args.ptr_K;
ptr_A_real = args.ptr_A_real;
ptr_A_imag = args.ptr_A_imag;
ptr_B_real = args.ptr_B_real;
ptr_B_imag = args.ptr_B_imag;
ptr_C_real = args.ptr_C_real;
ptr_C_imag = args.ptr_C_imag;
ptr_D_real = args.ptr_D_real;
ptr_D_imag = args.ptr_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplexArray op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int batch_idx = threadblock_tile_offset.k();
int problem_size_m = params.problem_size.m();
int problem_size_n = params.problem_size.n();
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_real[batch_idx]));
ElementA *ptr_A_imag = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_imag[batch_idx]));
ElementB *ptr_B_real = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_real[batch_idx]));
ElementB *ptr_B_imag = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_imag[batch_idx]));
//
// If pointers for problem sizes are specified, these are loaded from global memory
//
if (params.ptr_M) {
problem_size_m = params.ptr_M[batch_idx];
}
if (params.ptr_N) {
problem_size_n = params.ptr_N[batch_idx];
}
if (params.ptr_K) {
problem_size_k = params.ptr_K[batch_idx];
}
int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM;
int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN;
int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
//
// Each threadblock loops over the logical problem size which the kernel may have discovered
// after the grid is launched.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int block_m = threadblock_tile_offset.m();
block_m < kBlockCountM;
block_m += params.grid_tiled_shape.m()) {
CUTLASS_PRAGMA_NO_UNROLL
for (int block_n = threadblock_tile_offset.n();
block_n < kBlockCountN;
block_n += params.grid_tiled_shape.n()) {
//
// Compute indices within threadblock and warp.
//
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Proceed with regular GEMM logic.
//
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0};
cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN };
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(
kGemmKIterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
//assume identity swizzle
MatrixCoord threadblock_offset(
block_m * Mma::Shape::kM,
block_n * Mma::Shape::kN
);
ElementC *ptr_C_real = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_real[batch_idx]));
ElementC *ptr_C_imag = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_imag[batch_idx]));
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real[batch_idx]);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag[batch_idx]);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
} // for block_n
} // for block_m
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h",
"repo_id": "cutlass",
"token_count": 7703
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of universal GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace util {
template <class LayoutA, class LayoutB>
CUTLASS_HOST_DEVICE
static bool
is_continous_k_aligned(GemmCoord problem_size, size_t alignmentA, size_t alignmentB) {
return (platform::is_same<LayoutA, layout::RowMajor>::value && (problem_size.k() % alignmentA) == 0) ||
(platform::is_same<LayoutB, layout::ColumnMajor>::value && (problem_size.k() % alignmentB) == 0);
}
} // namespace util
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Argument structure
struct UniversalArgumentsBase
{
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size{};
int batch_count{1};
int64_t batch_stride_D{0};
//
// Methods
//
UniversalArgumentsBase() = default;
/// constructs an arguments structure
UniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
int64_t batch_stride_D)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
batch_stride_D(batch_stride_D)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutA,
typename LayoutB>
struct UniversalParamsBase
{
//
// Data members
//
GemmCoord problem_size{};
GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count {0};
int gemm_k_size {0};
int64_t batch_stride_D {0};
int *semaphore = nullptr;
//
// Host dispatch API
//
/// Default constructor
UniversalParamsBase() = default;
/// Constructor
UniversalParamsBase(
UniversalArgumentsBase const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
problem_size(args.problem_size),
mode(args.mode),
batch_count(args.batch_count),
batch_stride_D(args.batch_stride_D),
semaphore(nullptr)
{
init_grid_tiled_shape();
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = 0;
if (mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes =
sizeof(ElementC) *
size_t(batch_stride_D) *
size_t(grid_tiled_shape.k());
}
else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1)
{
// Serial split-K only requires a temporary workspace if the number of partitions along the
// GEMM K dimension is greater than one.
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
semaphore = static_cast<int *>(workspace);
// Zero-initialize entire workspace
if (semaphore)
{
size_t workspace_bytes = get_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes");
cudaError_t result = cudaMemsetAsync(
semaphore,
0,
workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
GemmCoord get_tiled_shape() const
{
return grid_tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape);
}
private:
CUTLASS_HOST_DEVICE
void init_grid_tiled_shape() {
// Get GEMM volume in thread block tiles
grid_tiled_shape = ThreadblockSwizzle::get_tiled_shape(
problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
batch_count);
swizzle_log_tile = ThreadblockSwizzle::get_log_tile(grid_tiled_shape);
// Determine extent of K-dimension assigned to each block
gemm_k_size = problem_size.k();
if (mode == GemmUniversalMode::kGemm || mode == GemmUniversalMode::kGemmSplitKParallel)
{
static const uint32_t CACHELINE_BYTES = 128;
static const size_t element_bytes_a = sizeof(ElementA);
static const size_t element_bytes_b = sizeof(ElementB);
static const size_t cacheline_elements_a = CACHELINE_BYTES / element_bytes_a;
static const size_t cacheline_elements_b = CACHELINE_BYTES / element_bytes_b;
const bool cacheline_alignment_needed =
util::is_continous_k_aligned<LayoutA, LayoutB>(problem_size, cacheline_elements_a, cacheline_elements_b);
int const kAlignK = const_max(
const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value),
cacheline_alignment_needed ? const_max(cacheline_elements_a, cacheline_elements_b) : 1);
gemm_k_size = round_up(ceil_div(problem_size.k(), batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(problem_size.k(), gemm_k_size);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/kernel/params_universal_base.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/kernel/params_universal_base.h",
"repo_id": "cutlass",
"token_count": 2874
} | 39 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/fast_math.h"
#include "cutlass/gemm_coord.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/kernel/tile_scheduler_params.h"
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/arch/cluster_sm90.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Persistent Thread Block (TB) scheduler
template <class GroupProblemShape>
class PersistentTileSchedulerSm90Group {
//
// Data members
//
private:
uint64_t current_work_linear_idx_ = 0;
uint64_t total_grid_size_ = 0;
// Tracking current group, its starting linear idx and total tiles
struct GroupInfo {
int group_idx = 0;
uint64_t start_linear_idx = 0;
uint64_t total_tiles = 0;
} current_group_info_;
public:
struct WorkTileInfo {
int32_t M_idx = 0;
int32_t N_idx = 0;
int32_t L_idx = 0;
bool is_valid_tile = false;
CUTLASS_HOST_DEVICE
bool
is_valid() const {
return is_valid_tile;
}
CUTLASS_HOST_DEVICE
static WorkTileInfo
invalid_work_tile() {
return {-1, -1, -1, false};
}
CUTLASS_HOST_DEVICE
bool
is_final_split(uint32_t k_tiles_per_output_tile) const {
return true;
}
CUTLASS_HOST_DEVICE
int32_t
reduction_subtile_idx() const {
return -1;
}
};
using ProblemShape = typename GroupProblemShape::UnderlyingProblemShape;
using Params = PersistentTileSchedulerSm90GroupParams<ProblemShape>;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
struct Arguments {
int max_swizzle_size = 1;
// Not applying Heuristics for Grouped problems, since largest dimension can change per group
RasterOrderOptions raster_order = RasterOrderOptions::AlongM;
};
// Sink scheduler params as a member
Params scheduler_params;
//
// Methods
//
template <class TileShape, class ClusterShape>
static Params
to_underlying_arguments(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo const& hw_info,
Arguments const& arguments,
[[maybe_unused]] void* workspace=nullptr,
[[maybe_unused]] const uint32_t epilogue_subtile = 1) {
// We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic
static_assert(cute::is_static<TileShape>::value);
static_assert(cute::is_static<ClusterShape>::value);
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
Params params;
params.initialize(
problem_blocks,
problem_shapes.groups(),
problem_shapes.problem_shapes,
problem_shapes.host_problem_shapes,
to_gemm_coord(tile_shape),
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order
);
return params;
}
// Given the inputs, computes the physical grid we should launch.
template<class TileShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info,
Arguments arguments,
bool truncate_by_problem_size=true) {
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order,
/* truncate_by_problem_size = */true
);
}
// Given the inputs, computes the total number of output blocks this problem will compute over
// Note that this is only the logical size of our grid, not the physical grid we will actually launch.
template<class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_tiled_cta_shape_mnl(int groups, GroupProblemShape problem_shapes, KernelHardwareInfo hw_info, BlockShape cta_shape, ClusterShape cluster_shape) {
uint32_t total_ctas = 0;
uint32_t cta_in_N_dim = 1; // We linearize the blocks across all the problems here
// If host problem shapes are not provided.
if (!problem_shapes.is_host_problem_shape_available()) {
total_ctas = hw_info.sm_count;
}
// If host problem shapes are provided, make a better decision about possibility to launch smaller grid.
else {
for (int group = 0; group < groups; group++) {
auto ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes.get_host_problem_shape(group)), cute::shape<0>(cta_shape)));
auto ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes.get_host_problem_shape(group)), cute::shape<1>(cta_shape)));
auto problem_blocks_m = round_up(ctas_along_m, cute::get<0>(cluster_shape));
auto problem_blocks_n = round_up(ctas_along_n, cute::get<1>(cluster_shape));
total_ctas += problem_blocks_m * problem_blocks_n;
}
}
return Params::get_tiled_cta_shape_mnl(
to_gemm_coord(cluster_shape),
total_ctas, cta_in_N_dim
);
}
CUTLASS_HOST_DEVICE
static bool
can_implement(Arguments const& args) {
return true;
}
PersistentTileSchedulerSm90Group() = default;
CUTLASS_DEVICE explicit PersistentTileSchedulerSm90Group(Params const& params_) : scheduler_params(params_) {
// MSVC requires protecting use of CUDA-specific nonstandard syntax,
// like blockIdx and gridDim, with __CUDA_ARCH__.
#if defined(__CUDA_ARCH__)
if (scheduler_params.raster_order_ == RasterOrder::AlongN) {
current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x);
}
else {
current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y);
}
total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z);
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(params_.problem_shapes_[0]))>::value ||
is_tuple<decltype(cute::shape<1>(params_.problem_shapes_[0]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.n()));
}
else {
ctas_along_m = scheduler_params.divmod_cta_shape_m_.divide(cute::shape<0>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_m_.divisor - 1);
ctas_along_n = scheduler_params.divmod_cta_shape_n_.divide(cute::shape<1>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_n_.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.n());
current_group_info_.total_tiles = problem_blocks_m * problem_blocks_n;
#else
CUTLASS_ASSERT(false && "This line should never be reached");
#endif
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work() {
return get_current_work_for_linear_idx(current_work_linear_idx_);
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work_for_linear_idx(uint64_t linear_idx) {
if (scheduler_params.pre_processed_problem_shapes && linear_idx >= scheduler_params.blocks_across_problem_) {
return WorkTileInfo::invalid_work_tile();
}
return get_work_idx_m_and_n(linear_idx,
current_group_info_,
scheduler_params.groups_,
scheduler_params.problem_shapes_,
scheduler_params.cta_shape_,
scheduler_params.cluster_shape_,
scheduler_params.divmod_cluster_shape_major_,
scheduler_params.divmod_cluster_shape_minor_,
scheduler_params.divmod_cta_shape_m_,
scheduler_params.divmod_cta_shape_n_,
scheduler_params.log_swizzle_size_,
scheduler_params.raster_order_);
}
CUTLASS_DEVICE
void
advance_to_next_work(uint32_t advance_count = 1) {
current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count);
}
// get work_idx_m, work_idx_n from linear_idx while applying swizzle
static CUTLASS_DEVICE
WorkTileInfo
get_work_idx_m_and_n(
uint64_t linear_idx,
struct GroupInfo& group_info,
int32_t total_problem_groups,
ProblemShape* problem_shapes,
GemmCoord cta_shape,
GemmCoord cluster_shape,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cta_shape_m,
FastDivmodU64 const& divmod_cta_shape_n,
int32_t log_swizzle_size,
RasterOrder raster_order) {
bool valid_tile = true;
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
while (group_info.start_linear_idx + group_info.total_tiles <= linear_idx) {
group_info.group_idx++;
if (group_info.group_idx >= total_problem_groups)
return WorkTileInfo::invalid_work_tile();
group_info.start_linear_idx += group_info.total_tiles;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
}
uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0;
uint64_t blk_per_grid_dim = divmod_cluster_shape_minor.divide(linear_idx - group_info.start_linear_idx);
divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim);
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
if (raster_order == RasterOrder::AlongN) {
cluster_minor_offset = cta_m_in_cluster;
}
else {
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major;
uint64_t cluster_idx_minor_div_swizzle, extra, offset;
offset = cluster_id & ((1 << log_swizzle_size) - 1);
extra = cluster_id >> log_swizzle_size;
uint64_t curr_group_cluster_blk_major;
if (raster_order == RasterOrder::AlongN) {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_n);
}
else {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_m);
}
cluster_idx_minor_div_swizzle = extra / curr_group_cluster_blk_major;
cluster_idx_major = extra % curr_group_cluster_blk_major;
cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset;
auto minor_work_idx = static_cast<int32_t>(cluster_idx_minor * divmod_cluster_shape_minor.divisor +
cluster_minor_offset);
auto major_work_idx = static_cast<int32_t>(cluster_idx_major * divmod_cluster_shape_major.divisor +
cluster_major_offset);
if (raster_order == RasterOrder::AlongN) {
return {minor_work_idx, major_work_idx, group_info.group_idx, valid_tile};
}
else {
return {major_work_idx, minor_work_idx, group_info.group_idx, valid_tile};
}
}
// Returns whether the block assigned this work should compute the epilogue for the corresponding
// output tile. For the basic tile scheduler, this is always true.
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const&, Params const&) {
return true;
}
// Performs the reduction across splits for a given output tile. Since this scheduler does
// not split output tiles, no reduction is needed.
template <class FrgTensorC>
CUTLASS_DEVICE
static void
fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {}
// Returns whether the current WorkTileInfo passed in should continue to be used. Since
// this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo
// passed in should not be used after having been processed.
CUTLASS_DEVICE
static bool
continue_current_work(WorkTileInfo&) {
return false;
}
// The basic tile scheduler does not require any additional workspace
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t, const uint32_t = 1) {
return 0;
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&,
uint32_t, const uint32_t = 1) {
return Status::kSuccess;
}
template <class ProblemShape_MNKL, class TileShape>
CUTLASS_HOST_DEVICE
static int
get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape_MNKL problem_shape, TileShape tile_shape) {
// All work units returned by this scheduler cover the entire K iteration
// space of the output tile assigned to the work unit.
return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape)));
}
CUTLASS_HOST_DEVICE
static uint32_t
get_work_k_tile_start(WorkTileInfo const&) {
// All work units returned by this scheduler start from K tile 0
return 0u;
}
CUTLASS_DEVICE
static bool
need_separate_reduction(Params const& params) {
return false;
}
CUTLASS_DEVICE
bool
is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) {
return false;
}
CUTLASS_DEVICE
uint32_t
epilgoue_subtile_idx(WorkTileInfo const& work_tile_info, Params const& params) const {
return 0;
}
template <class FrgTensorC>
CUTLASS_DEVICE
void
separate_reduction(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
// Shares the accumulator set with peers in the global workspace
template <class FrgTensorC>
CUTLASS_DEVICE
static void
share(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
CUTLASS_DEVICE
static bool
valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) {
return true;
}
CUTLASS_DEVICE
static bool
requires_separate_reduction(Params const& params) {
return false;
}
};
} // namespace cutlass::gemm::kernel::detail
| cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp/0 | {
"file_path": "cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp",
"repo_id": "cutlass",
"token_count": 7408
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/permute.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Permute operand A
typename PermuteALayout = layout::NoPermute,
/// Permute operand B
typename PermuteBLayout = layout::NoPermute
>
struct DefaultMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass Simt)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassSimt, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<float, LayoutA, kAlignmentA, float, LayoutB,
kAlignmentB, float, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float,
LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, float,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2,
Operator, true, SharedMemoryClearOption::kNone, false, false,
layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>,
typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassSimt,
Stages, Operator>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator, true, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for SIMT IDP4A Kernels
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Operation performed by GEMM
typename Operator,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape>
struct DefaultMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2,
Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
static const bool transposeA = platform::is_same< LayoutA, layout::ColumnMajor >::value;
static const bool transposeB = platform::is_same< LayoutB, layout::RowMajor >::value;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
OperatorClass, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// Specialization for Wmma TensorOp operator with 2 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for Wmma TensorOp operator with 1 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 1, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 1, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped singlestage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/threadblock/default_mma.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma.h",
"repo_id": "cutlass",
"token_count": 12168
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from
/// threadblock tile size, global memory data layout, and target math
/// instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator = arch::OpMultiplyAddComplex,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global>
struct DefaultMultistageMmaComplexCore;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h",
"repo_id": "cutlass",
"token_count": 1405
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexPipelined :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using ArchTag = typename Policy::Operator::ArchTag;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
private:
using FragmentA = typename IteratorA::Fragment;
using FragmentB = typename IteratorB::Fragment;
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexPipelined(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A_real;
FragmentA tb_frag_A_imag;
FragmentB tb_frag_B_real;
FragmentB tb_frag_B_imag;
tb_frag_A_real.clear();
tb_frag_A_imag.clear();
tb_frag_B_real.clear();
tb_frag_B_imag.clear();
// The last kblock is loaded in the prolog
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A_real.clear_mask(gemm_k_iterations <= 1);
iterator_A_imag.clear_mask(gemm_k_iterations <= 1);
iterator_B_real.clear_mask(gemm_k_iterations <= 1);
iterator_B_imag.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
__syncthreads();
++this->smem_iterator_B_;
++this->smem_iterator_A_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A_real.clear_mask(gemm_k_iterations <= 2);
iterator_A_imag.clear_mask(gemm_k_iterations <= 2);
iterator_B_real.clear_mask(gemm_k_iterations <= 2);
iterator_B_imag.clear_mask(gemm_k_iterations <= 2);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h",
"repo_id": "cutlass",
"token_count": 5667
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kRow][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kColumn][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = B[n].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? A[m].imag() : -A[m].imag());
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? -A[m].imag() : A[m].imag());
operand_B[0] = B[n].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&B);
//
// Accumulate in place
//
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
//
// Define conversions from source type to instruction operands' type
//
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
FloatRoundStyle const kRoundA = FloatRoundStyle::round_to_nearest;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_to_nearest;
#else
FloatRoundStyle const kRoundA = FloatRoundStyle::round_half_ulp_trunc_dntz;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_half_ulp_trunc_dntz;
#endif
detail::UnpackComplexConvertAndPackForMma <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
kRoundA> convert_A;
detail::UnpackComplexConvertAndPackForMma <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
kRoundB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<double>
// Math instruction: mma.sync.aligned.m16n8k4.f64.f64.f64.f64
// Output data type: complex<double>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<double>,
LayoutA_,
complex<double>,
LayoutB_,
complex<double>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = double;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = double;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = double;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
A[m*MmaOperandA::kElements + mk].imag() : -A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
-A[m*MmaOperandA::kElements + mk].imag() : A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h",
"repo_id": "cutlass",
"token_count": 13631
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/// Tile access iterator
/// Each iteration acess in the tile is
/// used as multiplicand for one
/// warp-level matrix multiplication
template <
/// Size of the tile (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Enable Residual Support
bool EnableResidual = false,
/// Number of partitions along K dimension
int PartitionsK_ = 1
>
class MmaTensorOpMultiplicandTileAccessIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
static int const kIterations = (kOperand == Operand::kA) ?
InstructionCount::kColumn : InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA) ?
(Shape::kRow * InstructionShape::kColumn / kThreads) :
(Shape::kColumn * InstructionShape::kRow / kThreads)
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to load residual tile
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), is_residual_(false), iterations_(0) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
if(EnableResidual) {
// compute residual offset
if (kOperand == Operand::kA) {
typename TensorCoord::Index residual_size =
extent_.column() % Shape::kColumn;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(0, residual_size);
}
}
else {
typename TensorCoord::Index residual_size =
extent_.row() % Shape::kRow;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
int lane_id
): MmaTensorOpMultiplicandTileAccessIterator(ref,
{Shape::kRow, Shape::kColumn}, lane_id) {
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if(EnableResidual && is_residual_) {
is_residual_ = false;
origin_ += residual_offset_;
ref_.add_coord_offset(residual_offset_);
}
else {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator & operator++() {
iterations_++;
if(iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kColumn);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kRow,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h/0 | {
"file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h",
"repo_id": "cutlass",
"token_count": 4051
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic thread level reduction with specializations for Array<T, N>.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace reduction {
namespace thread {
/// Structure to compute the thread level reduction
template <typename Op, typename T>
struct Reduce;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization of Reduce for "plus" (a functional operator)
template <typename T>
struct Reduce< plus<T>, T > {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
plus<T> _op;
return _op(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization of Reduce for Array<T, N>
template <typename T, int N>
struct Reduce < plus<T>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, 1> operator()(Array<T, N> const &in) const {
Array<T, 1> result;
Reduce< plus<T>, T > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], in[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for Array<half_t, N>
template <int N>
struct Reduce < plus<half_t>, Array<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(Array<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
Array<half_t, 1> const *in_ptr_half = reinterpret_cast<Array<half_t, 1> const *>(&input);
Array<half_t, 2> const *in_ptr_half2 = reinterpret_cast<Array<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
Array<half_t, 1> tmp_last;
Array<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for AlignedArray<half_t, N>
template <int N>
struct Reduce < plus<half_t>, AlignedArray<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(AlignedArray<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
AlignedArray<half_t, 1> const *in_ptr_half = reinterpret_cast<AlignedArray<half_t, 1> const *>(&input);
AlignedArray<half_t, 2> const *in_ptr_half2 = reinterpret_cast<AlignedArray<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
AlignedArray<half_t, 1> tmp_last;
AlignedArray<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
}
}
}
| cutlass/include/cutlass/reduction/thread/reduce.h/0 | {
"file_path": "cutlass/include/cutlass/reduction/thread/reduce.h",
"repo_id": "cutlass",
"token_count": 2815
} | 46 |
[README](../../README.md#documentation) > **CUTLASS 3.0: Building on Windows with Visual Studio**
# Building on Windows with Visual Studio
CUTLASS 3.2 reintroduces support for the Microsoft Visual Studio compiler on Windows.
Users and developers may build either
in Visual Studio's graphical integrated development environment,
or on the command line with `cmake --build`.
# Software prerequisites
1. Windows 10 or 11
2. Visual Studio 2019 version 16.11.27, or Visual Studio 2022
3. CUDA Toolkit (at least 12.2; earlier 12.x versions may work)
4. CMake (at least 3.18)
5. git
6. Python (at least 3.6)
Visual Studio must be installed *before* the CUDA Toolkit.
Otherwise, Visual Studio's build system won't know about CUDA.
# Operating system settings
By default, Windows restricts the maximum file path length (`MAX_PATH`) to 260 characters.
CUTLASS has many files and directory paths that challenge this requirement.
As a result, CUTLASS is unlikely to build with this default setting.
The choice of source and build directories affect path lengths,
so the kinds of errors and whether they occur may depend on this.
Symptoms may vary, from errors when running `cmake`
(e.g., during the "generating library instances" step) to build failures.
CUTLASS recommends changing the maximum file path length setting
and rebooting the computer before attempting to clone or build CUTLASS.
Windows 10 (as of version 1607) and 11 permit changing this setting
by making sure that the following registry key exists,
and that its value is set to 1.
```
Computer\HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem\LongPathsEnabled
```
After changing the registry key's value, reboot the computer first
before attempting to clone or build CUTLASS.
[This Microsoft help article](https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry)
explains different ways to change the registry setting.
# Limitations
Currently, it's possible to build examples and tests.
Building the CUTLASS library (e.g., for profiling) with default settings does not currently work,
because Visual Studio's linker cannot handle more than 65535 symbols in a library.
(The symptom of this issue is a LNK1189 linker error.)
The known way to work around this Visual Studio limitation is to disable building CUTLASS's library,
by setting the CMake option `CUTLASS_ENABLE_LIBRARY` to `OFF`.
Another approach may be to limit the number of kernels in the library
by setting the CMake option `CUTLASS_LIBRARY_KERNELS`
so that CUTLASS tries to put fewer kernels in the library.
# Set up build environment
1. Run "git bash" to get a familiar command-line interface
2. Edit `~/.profile` and set the environment variables as needed to access the CUTLASS repository
3. Clone the CUTLASS repository
4. Create the `build` subdirectory in the CUTLASS clone directory, and run CMake in it,
specifying whatever CMake options are desired, e.g.,
`cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_ENABLE_LIBRARY=OFF`
Alternate approaches may rely on the CMake GUI and/or Windows' native command line.
# Building
A successful CMake run will create a `CUTLASS.sln` Visual Studio "solution" file in the build directory.
One can open this in Visual Studio and build the entire solution or any subset of projects as desired.
It may be necessary to limit maximum build parallelism by setting the appropriate Visual Studio option.
Alternately, one can run `cmake --build . --config Release -j 4` in the build directory.
Replace 4 with the desired maximum build parallelism.
It's important to put the `--build` option before the period that signifies the build directory.
The `--config` option specifies the kind of build;
`--config Release` builds a Release build, while `--config Debug` builds a Debug build.
Unlike with CMake's Makefile or Ninja generators,
`CMAKE_BUILD_TYPE` has no effect on the Visual Studio generator,
because the Visual Studio generator creates all build configurations.
| cutlass/media/docs/build/building_in_windows_with_visual_studio.md/0 | {
"file_path": "cutlass/media/docs/build/building_in_windows_with_visual_studio.md",
"repo_id": "cutlass",
"token_count": 1063
} | 47 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Functionality")
[README](../../README.md#documentation) > **Functionality**
# Functionality
Note : CUTLASS-3 requires users to use CUDA 11.4 or newer, and SM70 or newer, for the target toolkit and architecture, respectively.
Please refer to the [Compatibility](/README.md#Compatibility) section for more details.
- N - Column Major Matrix
- T - Row Major matrix
- {N,T} x {N,T} - All combinations, i.e., NN, NT, TN, TT
- [NHWC](/include/cutlass/layout/tensor.h#L63-206) - 4 dimension tensor used for convolution
- [NCxHWx](/include/cutlass/layout/tensor.h#L290-395) - Interleaved 4 dimension tensor used for convolution
- f - floating point
- s - signed int
- b - bit
- cf - complex float
- bf16 - bfloat16
- tf32 - tfloat32
- Simt - Use Simt CUDA Core MMA
- TensorOp - Use Tensor Core MMA
- SpTensorOp - Use Sparse Tensor Core MMA
- WmmaTensorOp - Use WMMA abstraction to use Tensor Core MMA
## Device-level GEMM
The following tables summarize device-level GEMM kernels in CUTLASS, organized by opcode class, data type, and layout.
Hyperlinks to relevant unit tests demonstrate how specific template instances may be defined.
### CUTLASS 3.x Kernels
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------|
| **TensorOp** | 90a | 12.0+ | `f16 * f16 + { f16, f32 } => { f16, f32 }` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu) |
| **TensorOp** | 90a | 12.0+ | `bf16 * bf16 + { f16, f32 } => { bf16, f32 }`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu) |
| **TensorOp** | 90a | 12.0+ | `{f32, tf32} * {f32, tf32} + f32 => f32`| { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu) |
| **TensorOp** | 90a | 12.0+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu) |
### CUTLASS 2.x Kernels
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------|
| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_sgemm_nt_sm50.cu) |
| **Simt** | 50+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_dgemm_nt_sm50.cu) |
| **Simt** | 60+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_hgemm_nt_sm50.cu) |
| **Simt** | 61+ | 11.4+ | `s8 * s8 + s32 => {s32,s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_igemm_nt_sm50.cu) |
| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f16_sm70.cu) |
| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s4t_wmma_tensor_op_s32_sm75.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_b1t_wmma_tensor_op_s32_sm75.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f32_sm70.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_bf16n_bf16t_bf16t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `cf64 * cf64 + cf64 => cf64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu), [Gaussian 3m](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s8, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s4, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sparse_sm80.cu) |
| **TensorOp** | 90+ | 11.8+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) |
## Device-level Implicit GEMM convolution
The following table summarizes device-level implicit GEMM convolution kernels in CUTLASS, organized by opcode class, data type, and layout.
Hyperlinks to relevant conv2d fprop unit tests demonstrate how specific template instances may be defined.
One can find and/or create equivalent dgrad and wgrad convolutional operators.
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------|------------------|
| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm50.cu) |
| **Simt** | 50+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm50.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm70.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm75.cu) |
| **Simt** | 80+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu) |
| **Simt** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm80.cu) |
## Warp-level Matrix Multiply with Tensor Cores
The following table summarizes supported warp level shapes for each TensorOp instruction.
|**Opcode Class** | **Instruction Shape** | **Warp Shapes** |
|-----------------|-----------------------|--------------------------------------------|
| **TensorOp** | 8-by-8-by-4 | 32x32x4, 32x64x4, 64x32x4, 64x64x4 |
| **TensorOp** | 16-by-8-by-8 | 32x32x8, 32x64x8, 64x32x8, 64x64x8 |
| **TensorOp** | 16-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 |
| **TensorOp** | 8-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 |
| **TensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **TensorOp** | 16-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **TensorOp** | 16-by-8-by-64 | 32x32x64, 32x64x64, 64x32x64, 64x64x64 |
| **TensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 |
| **TensorOp** | 16-by-8-by-256 | 32x32x256, 32x64x256, 64x32x256, 64x64x256 |
| **SpTensorOp** | 16-by-8-by-16 | 64x64x16, 64x32x16, 32x64x16, 32x32x16 |
| **SpTensorOp** | 16-by-8-by-32 | 64x64x32, 64x32x32, 32x64x32, 32x32x32 |
| **SpTensorOp** | 16-by-8-by-64 | 64x64x64, 64x32x64, 32x64x64, 32x32x64 |
| **SpTensorOp** | 16-by-8-by-128 | 64x64x128, 64x32x128, 32x64x128, 32x32x128 |
TensorOp instructions depend on a permuted shared memory layout that can be efficiently
loaded from. The following tables summarize the destination shared memory layout that
can be targeted by matrix operands. It is assumed that each thread loads 128b vectors
from global memory with layout specified in the column "GMEM Layout."
**TensorOp 8-by-8-by-4.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|-----------------------------------------|
| **A** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCongruous<16>` |
| **A** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCrosswise<16>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCrosswise<16>` |
| **B** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-8.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` |
| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` |
| **B** | `half_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-8.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<32>` |
| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32>` |
| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32>` |
| **B** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCongruous<32>` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` |
| **A** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` |
| **B** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` |
| **B** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-4.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<64>` |
| **A** | `double` | `RowMajor` | `RowMajorTensorOpCrosswise<64>` |
| **B** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<64>` |
| **B** | `double` | `RowMajor` | `RowMajorTensorOpCongruous<64>` |
| **C** | `double` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-64.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-128.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `bin1_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `bin1_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32, 32>` |
| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32, 32>`|
| **C** | `float` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|---------------------------------------|
| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16, 64>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16, 64>`|
| **C** | `float` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-64.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|---------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8, 128>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<8, 128>`|
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-128.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4, 256>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<4, 256>`|
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
## Warp-level Matrix Multiply with CUDA WMMA API
The following table summarizes supported warp level shapes for each WmmaTensorOp instruction.
|**Opcode Class** | **Instruction Shape** | **Warp Shapes** |
|---------------------|-----------------------|--------------------------------------------|
| **WmmaTensorOp** | 16-by-16-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 8-by-32-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 32-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **WmmaTensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 |
CUDA exposes warp-level matrix operations in the CUDA C++ WMMA API. The CUDA C++ WMMA API exposes Tensor Cores via a set of functions and types in the `nvcuda::wmma` namespace. The functions and types in `nvcuda::wmma` provide target-independent APIs and implement architecture-specific tensor operation using TensorOp instruction underneath. CUTLASS exposes WMMA API through WmmaTensorOp. The WmmaTensorOp supports canonical shared memory layouts. The following table summarizes the destination shared memory layout that can be targeted by matrix operands. The WMMA API expects that matrices in shared memory loaded by `nvcuda::wmma::load_matrix_sync()` satisfy 128 bit alignment.
**WmmaTensorOp (all matrix sizes and data types).**
|**Operand** | **GMEM Layout** | **SMEM Layout** |
|------------|----------------------------|------------------------------|
| **A** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
| **B** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
| **C** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| cutlass/media/docs/functionality.md/0 | {
"file_path": "cutlass/media/docs/functionality.md",
"repo_id": "cutlass",
"token_count": 13595
} | 48 |
![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "Complete CUDA GEMM decomposition")
# Python packages associated with CUTLASS
This directory contains Python packages that are associated with CUTLASS:
* `cutlass`: the CUTLASS Python interface, which enables one to compile and run CUTLASS kernels from within Python
* `cutlass_library`: utilities used for enumerating and emitting C++ code for CUTLASS kernels
## CUTLASS Python Interface
The CUTLASS Python interface enables one to compile and run CUTLASS operations from within Python.
```python
import cutlass
import numpy as np
plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
A, B, C, D = [np.ones((1024, 1024), dtype=np.float16) for i in range(4)]
plan.run(A, B, C, D)
```
### Overview
The CUTLASS Python interface prioritizes ease of use.
It has the following features that support this goal.
* It presents high-level interfaces for operators, that require only few parameters.
* It selects sensible default configurations for an operator given the parameters that have been specified.
* It enumerates configurations for users that are known to work in a given setting.
* It favors emitting descriptive Python run-time exceptions instead of C++ compile-time errors, where possible.
* It simplifies exporting CUTLASS kernels to framework extensions (e.g., PyTorch CUDA extensions).
#### Non-goals
The CUTLASS Python interface does not intend to:
1. select optimal kernel configurations,
2. act as a fast container for CUTLASS kernels, or
3. act as a Python-to-CUDA-kernel just-in-time (JIT) compilation engine.
Regarding selection of optimal kernel configurations,
the interface favors ease-of-use over maximum configurability.
Thus, its default selections for operator parameters may
not achieve the highest possible performance in all scenarios. Users wishing to achieve the highest performance possible should either
* select parameters by profiling different combinations of them, or
* use a library such as [cuBLAS](https://developer.nvidia.com/cublas)
that contains heuristics for selecting kernels.
Regarding acting as a fast container for CUTLASS kernels:
the interface does not strive to minimize overhead in its Python functions surrounding the running of a kernel.
Those wishing to deploy a CUTLASS kernel should either
* use the C++ emitted by the Python interface directly, or
* use one of the CUTLASS emitters for automatically creating a framework extension for the kernel (e.g., a PyTorch CUDA extension).
Regarding acting as a Python-to-CUDA-kernel JIT compilation engine:
the interface enables use of CUTLASS in Python code.
It can be used by frameworks for JIT compiling
Python to CUDA kernels, but does not set out to be such a framework.
#### Comparison to PyCUTLASS
The CUTLASS Python interface builds atop CUTLASS's [PyCUTLASS](https://github.com/NVIDIA/cutlass/tree/v3.0.0/tools/library/scripts/pycutlass) library. PyCUTLASS enables
one to declare, compile, and run GEMMs, convolutions, and grouped GEMM operators with nearly the same configuration
space as CUTLASS's C++ interface. While this flexibility enables one to achieve the similar levels of functionality
as available in CUTLASS's C++ interface, it comes with the burden of needing to specify many configuration parameters
to operators -- similar to what one must do in specifying template parameters to operations in CUTLASS's C++ interface.
In contrast, the CUTLASS Python interface aims to provide a higher-level API for declaring, emitting, and compiling
kernels that does not require exhaustively defining template parameters.
### Current functionality
The CUTLASS Python interface currently supports the following operations:
* GEMMs
* GEMMs with fused elementwise epilogues (e.g., ReLU) (for pre-SM90 kernels)
* Stream K swizzling (for pre-SM90 kernels)
* Grouped GEMM (for pre-SM90 kernels)
### Getting started
We recommend using the CUTLASS Python interface via an [NGC PyTorch Docker container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch):
```bash
docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:23.08-py3 -p 8888:8888
```
The CUTLASS Python interface has been tested with CUDA 11.8, 12.0, and 12.1 on Python 3.8 and 3.9.
#### Optional environment variables
Prior to installing the CUTLASS Python interface, one may optionally set the following environment variables:
* `CUTLASS_PATH`: the path to the cloned CUTLASS repository
* `CUDA_INSTALL_PATH`: the path to the installation of CUDA
If these environment variables are not set, the installation process will infer them to be the following:
* `CUTLASS_PATH`: either one directory level above the current directory (i.e., `$(pwd)/..`) if installed locally or in the `source` directory of the location in which `cutlass_library` was installed
* `CUDA_INSTALL_PATH`: the directory holding `/bin/nvcc` for the first version of `nvcc` on `$PATH` (i.e., `which nvcc | awk -F'/bin/nvcc' '{print $1}'`)
**NOTE:** The version of `cuda-python` installed must match the CUDA version in `CUDA_INSTALL_PATH`.
#### Installation
Stable releases of the CUTLASS Python interface are available via the `nvidia-cutlass` PyPI package. Any other packages with the name `cutlass` are not affiliated with NVIDIA CUTLASS.
```bash
pip install nvidia-cutlass
```
The CUTLASS Python interface can also be installed from source by navigating to the root of the CUTLASS directory and performing
```bash
pip install .
```
If you would like to be able to make changes to the CUTLASS Python interface and have them reflected when using the interface, perform:
```bash
pip install -e .
```
To test that your installation was successful, you can run:
```python
import cutlass
import numpy as np
plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
A, B, C, D = [np.ones((128, 128), dtype=np.float16) for i in range(4)]
plan.run(A, B, C, D)
```
### Deep learning framework CUDA extensions
The CUTLASS Python interface provides utilities for exporting a CUTLASS kernel to a deep learning framework CUDA extensions. Currently, PyTorch CUDA extensions can be exported, but a similar pattern could be applied for other frameworks as well. An example of this is provided [here](/examples/python/02_pytorch_extension_grouped_gemm.ipynb).
Currently, the following operations can be exported to a PyTorch CUDA extension:
* GEMM
* Grouped GEMM
* Conv2d
### Examples
Jupyter notebook examples of using the CUTLASS Python interface are located in [examples/python](/examples/python).
To launch these notebooks from this directory, run:
```bash
jupyter-lab ../examples/python
```
### Building documentation
The CUTLASS Python interface uses [Sphinx](https://www.sphinx-doc.org/en/master/) for documentation.
Building the documentation requires additional packages. The following commands will install them.
```bash
sudo apt-get install pandoc
pip install --upgrade Sphinx furo pandoc myst-parser sphinx-copybutton nbsphinx nbsphinx-link sphinx-inline-tabs
```
To build documentation, you must first have installed the CUTLASS Python interface via the
[installation instructions](#installation).
Documentation can then be built via the following commands.
```bash
sphinx-apidoc -o docs_src/source/ cutlass/ cutlass/backend*
cd docs_src
make html
mv _build/* ../docs
```
## CUTLASS library package
[cutlass_library](/python/cutlass_library) contains utilities for enumerating and emitting CUTLASS C++ kernels.
It is used by the CUTLASS CMake system to construct a library of kernels that can be profiled using the CUTLASS profiler.
To install the `cutlass_library` package, run
```bash
python setup_library.py develop --user
```
Alternatively, `cutlass_library` will automatically be installed if you install the CUTLASS Python interface package.
You can also use the [generator.py](/python/cutlass_library/generator.py) script directly without installing the module.
# Copyright
Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| cutlass/python/README.md/0 | {
"file_path": "cutlass/python/README.md",
"repo_id": "cutlass",
"token_count": 2666
} | 49 |
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 4a5275a3b68094ba1d8a4b7e4c459321
tags: 645f666f9bcd5a90fca523b33c5a78b7
| cutlass/python/docs/.buildinfo/0 | {
"file_path": "cutlass/python/docs/.buildinfo",
"repo_id": "cutlass",
"token_count": 85
} | 50 |
/*
* doctools.js
* ~~~~~~~~~~~
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
"TEXTAREA",
"INPUT",
"SELECT",
"BUTTON",
]);
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
} else {
document.addEventListener("DOMContentLoaded", callback);
}
};
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
TRANSLATIONS: {},
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext: (string) => {
const translated = Documentation.TRANSLATIONS[string];
switch (typeof translated) {
case "undefined":
return string; // no translation
case "string":
return translated; // translation exists
default:
return translated[0]; // (singular, plural) translation tuple exists
}
},
ngettext: (singular, plural, n) => {
const translated = Documentation.TRANSLATIONS[singular];
if (typeof translated !== "undefined")
return translated[Documentation.PLURAL_EXPR(n)];
return n === 1 ? singular : plural;
},
addTranslations: (catalog) => {
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
Documentation.PLURAL_EXPR = new Function(
"n",
`return (${catalog.plural_expr})`
);
Documentation.LOCALE = catalog.locale;
},
/**
* helper function to focus on search bar
*/
focusSearchBar: () => {
document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
* Initialise the domain index toggle buttons
*/
initDomainIndexTable: () => {
const toggler = (el) => {
const idNumber = el.id.substr(7);
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
if (el.src.substr(-9) === "minus.png") {
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
toggledRows.forEach((el) => (el.style.display = "none"));
} else {
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
toggledRows.forEach((el) => (el.style.display = ""));
}
};
const togglerElements = document.querySelectorAll("img.toggler");
togglerElements.forEach((el) =>
el.addEventListener("click", (event) => toggler(event.currentTarget))
);
togglerElements.forEach((el) => (el.style.display = ""));
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
initOnKeyListeners: () => {
// only install a listener if it is really needed
if (
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
)
return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
case "ArrowLeft":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const prevLink = document.querySelector('link[rel="prev"]');
if (prevLink && prevLink.href) {
window.location.href = prevLink.href;
event.preventDefault();
}
break;
case "ArrowRight":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const nextLink = document.querySelector('link[rel="next"]');
if (nextLink && nextLink.href) {
window.location.href = nextLink.href;
event.preventDefault();
}
break;
}
}
// some keyboard layouts may need Shift to get /
switch (event.key) {
case "/":
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
Documentation.focusSearchBar();
event.preventDefault();
}
});
},
};
// quick alias for translations
const _ = Documentation.gettext;
_ready(Documentation.init);
| cutlass/python/docs/_static/doctools.js/0 | {
"file_path": "cutlass/python/docs/_static/doctools.js",
"repo_id": "cutlass",
"token_count": 1836
} | 51 |
CUTLASS Python API
==================
.. toctree::
:maxdepth: 5
cutlass
| cutlass/python/docs_src/source/modules.rst/0 | {
"file_path": "cutlass/python/docs_src/source/modules.rst",
"repo_id": "cutlass",
"token_count": 30
} | 52 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for compute node in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from cutlass import swizzle
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTCompute(EVTTestCaseBase):
def test_arith(self):
"""
Test Arithmatic op
"""
def evt_arith_compute(accum, C, alpha, beta, gamma):
D = ((accum + C) * alpha - gamma) / beta
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_arith_compute, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_func_call(self):
"""
Test Function call
"""
def evt_func_call(accum, C, alpha, beta, gamma):
D = multiply_add(relu(accum + alpha) + C, beta, gamma)
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_func_call, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| cutlass/test/python/cutlass/evt/evt_compute_sm80_90.py/0 | {
"file_path": "cutlass/test/python/cutlass/evt/evt_compute_sm80_90.py",
"repo_id": "cutlass",
"token_count": 1571
} | 53 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with S8 operands on SM90
"""
from functools import partial
import logging
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
from utils import LayoutCombination, add_test_gemm
cutlass.set_log_level(logging.WARNING)
cc = 90
dtype = cutlass.DataType.s8
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
@unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}')
class GemmS8Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmS8Sm90, element=dtype, compilation_modes=['nvcc'])
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
# Tests with 1x1x1 clusters
add_test_tensorop(layouts=LayoutCombination.TNN, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=3)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 8], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 64, 32], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[ 4, 4, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with different cluster shapes
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 2, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 4, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with warp-specialized ping-pong schedule
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 1, 1], threadblock_shape=[128, 128, 128], stages=None,
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized)
# Tests for SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 32, 8], stages=2)
if __name__ == '__main__':
unittest.main()
| cutlass/test/python/cutlass/gemm/gemm_s8_sm90.py/0 | {
"file_path": "cutlass/test/python/cutlass/gemm/gemm_s8_sm90.py",
"repo_id": "cutlass",
"token_count": 1933
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedConv2d {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
/// Reduction kernel
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>;
using ReductionStrideIndex = typename ReductionDevice::StrideIndex;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
int tested_problem_count;
public:
TestbedConv2d(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_), tested_problem_count(0) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
}
else {
scope = 5;
}
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
// increment tested problem count run by the testbed
tested_problem_count++;
#if 0 // display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
split_k_mode
);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// conv2d operation with parallel split-k-mode
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// conv2d output is written to workspace in global memory
conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get()));
// accumulate mma for each cta in k-dimension (1.0 * A * B)
conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)};
// update conv2d operator arguments
status = conv2d_op.update(conv2d_args, workspace.get());
}
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run." << std::endl;
return false;
}
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// configure parallel reduction operator
ReductionDevice reduction_op;
typename ReductionDevice::Arguments reduction_args(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(),
problem_size.split_k_slices,
cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size),
{
reinterpret_cast<ElementAccumulator*> (workspace.get()),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_D_computed.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_C.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
// apply alpha, beta to obtain the following equation alpha * ReduceAdd(A * B) + beta * C
{alpha, beta}
);
status = reduction_op.initialize(reduction_args, nullptr);
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run prallel reduction kernel
status = reduction_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
tensor_D_computed.sync_host();
//
// Reference check - support caching results
//
CachedTestKey cached_test_key = CreateCachedConv2dTestKey<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute
>(
kConvolutionalOperator,
problem_size,
alpha,
beta,
tensor_A.host_view(),
tensor_B.host_view(),
tensor_C.host_view()
);
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string conv2d_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
CachedTestResultListing cached_results(conv2d_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
}
if (!cached_result_loaded) {
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
cached_test_result.D = TensorHash(tensor_D_reference.host_view());
CachedTestResultListing cached_results(conv2d_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(conv2d_result_cache_name);
}
} // if (!cached_result_loaded)
uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view());
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
passed = (tensor_D_hash == cached_test_result.D);
EXPECT_EQ(tensor_D_hash, cached_test_result.D)
<< "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n";
}
else {
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
}
EXPECT_TRUE(passed);
std::stringstream ss_problem_size_text;
ss_problem_size_text << "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_");
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDeconv ? "deconv_" : "wgrad_")))
<< ss_problem_size_text.str()
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n";
results << "\nD reference (hash: " << cached_test_result.D << ")\n";
if (!cached_result_loaded) {
results
<< tensor_D_reference.host_view() << "\n";
}
results
<< "\nD computed (hash: " << tensor_D_hash << ")\n"
<< tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestSpecificConv2d(
const Conv2dProblemVector & problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedConv2d<ImplicitGemm> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestAllConv2d(
const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2d<ImplicitGemm> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
// Vectors of Conv2dProblemVector (lenient/easiest to rigorous problem sizes)
std::vector<Conv2dProblemVector> problem_vectors = {
conv_test_sizes, // run user specified sizes
conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
//conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Flatten 2D problem_vectors into a 1D problem_sizes
std::vector<cutlass::conv::Conv2dProblemSize> problem_sizes;
for (auto problem_vector : problem_vectors) {
for(auto conv_problem : problem_vector) {
problem_sizes.push_back(conv_problem);
}
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reverse the order (rigorous to lenient)
// run the most rigorous problem size first
if (CutlassUnitTestProblemCount()) {
std::reverse(problem_sizes.begin(), problem_sizes.end());
}
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
// Fixed channels algorithm requires channel count to match access size
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFixedChannels) {
if (conv_problem.C != ImplicitGemm::UnderlyingKernel::Mma::IteratorA::AccessType::kElements) {
continue;
}
}
// Few channels algorithm requires channel count to match access size
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFewChannels) {
if (conv_problem.C % ImplicitGemm::UnderlyingKernel::Mma::IteratorA::AccessType::kElements) {
continue;
}
}
// CUTLASS DGRAD's *strided* stride specialization supports all stride {stride_h, stride_w}
// Although strided dgrad works for all stride combinations, we are only going
// to run strided dgrad for non-unity strides
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reduce the number of tested problem counts
if (CutlassUnitTestProblemCount() &&
testbed.tested_problem_count > CutlassUnitTestProblemCount()) {
return true;
}
}
// Small-channels convolution can't run here.
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFixedChannels) {
return true;
}
// Small-channels convolution can't run here.
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFewChannels) {
return true;
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial,
cutlass::conv::SplitKMode::kParallel,
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reduce the number of tested problem counts
if (CutlassUnitTestProblemCount() &&
testbed.tested_problem_count > CutlassUnitTestProblemCount()) {
return true;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/test/unit/conv/device/conv2d_testbed.h/0 | {
"file_path": "cutlass/test/unit/conv/device/conv2d_testbed.h",
"repo_id": "cutlass",
"token_count": 10555
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Depthwise Direct Conv testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "../cache_testbed_output.h"
#include "conv2d_problems.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/cutlass.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedDepthwiseDirectConv2d {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementB, LayoutB> tensor_reordered_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
int tested_problem_count;
public:
TestbedDepthwiseDirectConv2d(cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_), tested_problem_count(0) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
} else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
} else {
scope = 5;
}
} else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(view, seed, scope, -scope, 0);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
} else {
}
}
void initialize(cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_reordered_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_reordered_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_reordered_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient(int smem_size) const {
//
// Determine SMEM requirements and waive if not satisfied
//
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < static_cast<size_t>(smem_size)) {
return false;
}
return true;
}
/// Executes one test
bool run(cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1.5),
ElementCompute beta = ElementCompute(1)) {
// increment tested problem count run by the testbed
tested_problem_count++;
#if 0 // display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: "
<< ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)")
<< std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
tensor_reordered_B.device_ref(),
split_k_mode);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.can_implement(problem_size);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
if (!sufficient(conv2d_op.get_smem_size())) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run." << std::endl;
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: " << cudaGetErrorString(result);
tensor_D_computed.sync_host();
//
// Reference check - support caching results
//
CachedTestKey cached_test_key =
CreateCachedConv2dTestKey<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementCompute>(kConvolutionalOperator,
problem_size,
alpha,
beta,
tensor_A.host_view(),
tensor_B.host_view(),
tensor_C.host_view());
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string conv2d_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
CachedTestResultListing cached_results(conv2d_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
}
if (!cached_result_loaded) {
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator>(kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator>(kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
cached_test_result.D = TensorHash(tensor_D_reference.host_view());
CachedTestResultListing cached_results(conv2d_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(conv2d_result_cache_name);
}
} // if (!cached_result_loaded)
uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view());
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
passed = (tensor_D_hash == cached_test_result.D);
EXPECT_EQ(tensor_D_hash, cached_test_result.D)
<< "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n";
}
else {
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
}
EXPECT_TRUE(passed);
std::stringstream ss_problem_size_text;
ss_problem_size_text << "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_");
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_DirectConv_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_"))
<< ss_problem_size_text.str()
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n";
results << "\nD reference (hash: " << cached_test_result.D << ")\n";
if (!cached_result_loaded) {
results
<< tensor_D_reference.host_view() << "\n";
}
results
<< "\nD computed (hash: " << tensor_D_hash << ")\n"
<< tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename DirectConv>
bool TestSpecificDepthwiseDirectConv2d(const Conv2dProblemVector &problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedDepthwiseDirectConv2d<DirectConv> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/test/unit/conv/device/depthwise_conv2d_direct_conv_testbed.h/0 | {
"file_path": "cutlass/test/unit/conv/device/depthwise_conv2d_direct_conv_testbed.h",
"repo_id": "cutlass",
"token_count": 8266
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, basic_rank2) {
int const M = 8;
int const N = 16;
int matrix_data[M * N] = {0};
cutlass::TensorRef<
int,
cutlass::IdentityTensorLayout<2> > matrix_ref(matrix_data, cutlass::make_Coord(N, 1));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
matrix_ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * N + n], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_column_major) {
int const M = 8;
int const N = 8;
int matrix_data[M * N];
cutlass::TensorRef<int, cutlass::layout::ColumnMajor> ref(matrix_data, M);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m + n * M], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_row_major) {
int const M = 8;
int const N = 16;
int matrix_data[M * N] = { 0 };
cutlass::TensorRef<int, cutlass::layout::RowMajor> ref(matrix_data, N);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * N + n], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_contiguous_dynamic) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorRef<int, cutlass::layout::ContiguousMatrix> ContiguousTensorRef;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
}
else {
row_stride = N;
col_stride = 1;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorRef ref(
matrix_data,
cutlass::layout::ContiguousMatrix::packed(cutlass::make_Coord(M, N), layouts[i]));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * row_stride + n * col_stride], int(m * N + n));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_column_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
// Define the Layout for a column-major interleaved matrix format
using Layout = cutlass::layout::ColumnMajorInterleaved<kInterleave>;
// Construct a TensorRef
cutlass::TensorRef<
int,
Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N)));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Verify
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; n += kInterleave) {
for (int i = 0; i < kInterleave; ++i) {
EXPECT_EQ(matrix_data[m * kInterleave + n * M + i], int(m + (n + i) * M));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_row_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
// Define the Layout for a row-major interleaved matrix format
using Layout = cutlass::layout::RowMajorInterleaved<kInterleave>;
// Construct a TensorRef
cutlass::TensorRef<
int,
Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N)));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Verify
for (int m = 0; m < M; m += kInterleave) {
for (int n = 0; n < N; ++n) {
for (int i = 0; i < kInterleave; ++i) {
EXPECT_EQ(matrix_data[m * N + i + n * kInterleave], int((m + i) + n * M));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/test/unit/core/tensor_ref.cu/0 | {
"file_path": "cutlass/test/unit/core/tensor_ref.cu",
"repo_id": "cutlass",
"token_count": 2450
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
using namespace cute;
template <class Layout>
void
test_coalesce(Layout const& layout)
{
auto coalesce_layout = coalesce(layout);
CUTLASS_TRACE_HOST(shape (layout) << " => " << shape (coalesce_layout));
CUTLASS_TRACE_HOST(stride(layout) << " " << stride(coalesce_layout));
CUTE_STATIC_ASSERT_V(depth(coalesce_layout) <= Int<1>{});
ASSERT_EQ(size(coalesce_layout), size(layout));
for (int i = 0; i < size(layout); ++i) {
EXPECT_EQ(coalesce_layout(i), layout(i));
}
}
TEST(CuTe_core, Coalesce)
{
{
auto layout = make_layout(Int<1>{}, Int<0>{});
test_coalesce(layout);
}
{
auto layout = make_layout(Int<1>{}, Int<1>{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, Int<6>{}, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, 7, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<4>{}, 7, Int<8>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<1>{}, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, 4, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, Int<0>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = Layout<Shape<Shape<_2,_2>,Shape<_2, _2>>,
Stride<Stride<_1,_4>,Stride<_8,_32>>>{};
test_coalesce(layout);
}
}
| cutlass/test/unit/cute/core/coalesce.cpp/0 | {
"file_path": "cutlass/test/unit/cute/core/coalesce.cpp",
"repo_id": "cutlass",
"token_count": 1874
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "cutlass/trace.h"
#include "cute/algorithm/tuple_algorithms.hpp"
#include "cute/container/array.hpp"
#include "cute/container/tuple.hpp"
TEST(CuTe_core, Reverse_Tuple)
{
using cute::get;
{
const auto t = cute::make_tuple();
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 0);
}
{
const auto t = cute::make_tuple(123);
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 1);
EXPECT_EQ(get<0>(t_r), 123);
}
{
const auto t = cute::make_tuple(123, 456);
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 2);
EXPECT_EQ(get<0>(t_r), 456);
EXPECT_EQ(get<1>(t_r), 123);
}
{
const auto t = cute::make_tuple(1, 2, 3, 4, 5);
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 5);
EXPECT_EQ(get<0>(t_r), 5);
EXPECT_EQ(get<1>(t_r), 4);
EXPECT_EQ(get<2>(t_r), 3);
EXPECT_EQ(get<3>(t_r), 2);
EXPECT_EQ(get<4>(t_r), 1);
}
{
const auto t = cute::make_tuple(cute::Int<1>{}, cute::Int<2>{}, 3);
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 3);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<0>(t_r))>, int>);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<1>(t_r))>, cute::Int<2>>);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<2>(t_r))>, cute::Int<1>>);
EXPECT_EQ(get<0>(t_r), 3);
EXPECT_EQ(get<1>(t_r), cute::Int<2>{});
EXPECT_EQ(get<2>(t_r), cute::Int<1>{});
}
}
TEST(CuTe_core, Reverse_Array)
{
using cute::get;
{
const auto t = cute::array<int, 0>{};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 0);
using reverse_type = cute::array<int, 0>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<int, 1>{123};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 1);
EXPECT_EQ(get<0>(t_r), 123);
using reverse_type = cute::array<int, 1>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<int, 2>{123, 456};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 2);
EXPECT_EQ(get<0>(t_r), 456);
EXPECT_EQ(get<1>(t_r), 123);
using reverse_type = cute::array<int, 2>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<float, 5>{1.125f, 2.25f, 3.5f, 4.625f, 5.75f};
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 5);
EXPECT_EQ(get<0>(t_r), 5.75f);
EXPECT_EQ(get<1>(t_r), 4.625f);
EXPECT_EQ(get<2>(t_r), 3.5f);
EXPECT_EQ(get<3>(t_r), 2.25f);
EXPECT_EQ(get<4>(t_r), 1.125f);
using reverse_type = cute::array<float, 5>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
}
| cutlass/test/unit/cute/core/reverse.cpp/0 | {
"file_path": "cutlass/test/unit/cute/core/reverse.cpp",
"repo_id": "cutlass",
"token_count": 2010
} | 59 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cassert>
#include <type_traits>
#include <cute/container/tuple.hpp>
#include <cute/int_tuple.hpp>
template<class T>
class ConvertibleTo {
public:
ConvertibleTo(T val) : val_(val) {}
operator T () const { return val_; }
private:
T val_ = 0;
};
template<class Integral, Integral Value>
using IC = std::integral_constant<Integral, Value>;
TEST(CuTe_core_msvc_compilation, TupleAssignment)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::tuple creation and assignment");
CUTLASS_TRACE_HOST("-------------------------------");
using forty_two_type = IC<int, 42>;
using forty_three_type = IC<size_t, 43>;
using ebo_s_type = cute::detail::EBO<0, forty_two_type>;
[[maybe_unused]] ebo_s_type ebo_s;
static_assert(std::is_same_v<decltype(cute::detail::getv(ebo_s)), forty_two_type>);
using ebo_d_type = cute::detail::EBO<1, size_t>;
[[maybe_unused]] ebo_d_type ebo_d(43u);
assert(ebo_d.t_ == 43u);
static_assert(std::is_same_v<std::remove_const_t<std::remove_reference_t<decltype(cute::detail::getv(ebo_d))>>, size_t > );
assert(cute::detail::getv(ebo_d) == 43u);
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb0{
41, forty_two_type{}, size_t(43u) };
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb1;
int val41 = ConvertibleTo{41};
assert(val41 == 41);
size_t val43 = ConvertibleTo{size_t(43u)};
assert(val43 == size_t{43u});
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb2{
ConvertibleTo{41}, forty_two_type{}, ConvertibleTo{size_t(43u)}};
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0>, int> tb3{ 41 };
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0>, int> tb3a{ 42 };
tb3 = tb3a;
using tuple_0d_type = cute::tuple<>;
using tuple_1d_d_type = cute::tuple<int>;
using tuple_1d_s_type = cute::tuple<forty_two_type>;
using tuple_2d_dd_type = cute::tuple<int, size_t>;
using tuple_2d_ss_type = cute::tuple<forty_two_type, forty_three_type>;
[[maybe_unused]] tuple_0d_type t0;
// Symptom: "illegal member initialization: 'TupleBase<int>' is not a base or member"
[[maybe_unused]] tuple_1d_d_type t1{ 42 };
[[maybe_unused]] tuple_1d_s_type t2;
[[maybe_unused]] tuple_1d_d_type t1a{ 43 };
t1 = t1a;
[[maybe_unused]] tuple_2d_dd_type t3{ 42, size_t(43u) };
[[maybe_unused]] tuple_2d_ss_type t4;
t3 = t4;
[[maybe_unused]] tuple_2d_dd_type t3a{ 44, size_t(45u) };
// Symptom: "illegal member initialization:
// 'TupleBase<int, unsigned __int64>' is not a base or member"
t3 = t3a;
}
TEST(CuTe_core_msvc_compilation, TupleGetSingleInteger)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::get<I> on cute::tuple for single integer I");
CUTLASS_TRACE_HOST("-------------------------------");
cute::tuple<int, ConvertibleTo<size_t>, IC<int, 43>> t0{ 41, size_t(42u), IC<int, 43>{} };
[[maybe_unused]] auto t0_0 = cute::get<0>(t0);
static_assert(std::is_same_v<decltype(t0_0), int>);
assert(t0_0 == 41);
[[maybe_unused]] auto t0_1 = cute::get<1>(t0);
static_assert(std::is_same_v<decltype(t0_1), ConvertibleTo<size_t>>);
[[maybe_unused]] auto t0_2 = cute::get<2>(t0);
static_assert(std::is_same_v<decltype(t0_2), IC<int, 43>>);
}
TEST(CuTe_core_msvc_compilation, TupleGetRecursive)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::get<I...> on cute::tuple");
CUTLASS_TRACE_HOST("-------------------------------");
using inner_tuple_type = cute::tuple<int, ConvertibleTo<size_t>, IC<int, 43>>;
using outer_tuple_type = cute::tuple<IC<int, 40>, inner_tuple_type, size_t>;
inner_tuple_type t0_inner{ 41, size_t(42u), IC<int, 43>{} };
outer_tuple_type t0_outer{ IC<int, 40>{}, t0_inner, size_t(44u) };
[[maybe_unused]] auto t0_outer_0 = cute::get<0>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_0), IC<int, 40>>);
[[maybe_unused]] auto t0_outer_1 = cute::get<1>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_1), inner_tuple_type>);
[[maybe_unused]] auto t0_outer_2 = cute::get<2>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_2), size_t>);
assert(t0_outer_2 == size_t(44u));
// Leftmost index is innermost in the nexted get sequence.
[[maybe_unused]] auto t0_outer_10 = cute::get<1, 0>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_10), int>);
assert(t0_outer_10 == 41);
}
| cutlass/test/unit/cute/msvc_compilation/tuple.cpp/0 | {
"file_path": "cutlass/test/unit/cute/msvc_compilation/tuple.cpp",
"repo_id": "cutlass",
"token_count": 2463
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <fstream>
#include <iostream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct MultistageTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute =
typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
//
// Methods
//
MultistageTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind, uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope = (cutlass::sizeof_bits<Element>::value == 8) ? 2 : 8;
cutlass::reference::host::TensorFillRandomUniform(view, seed, scope,
-scope, 0);
} else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Waives test if CUDA device is insufficient
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waives test if CUDA device is insufficient
if (!sufficient()) {
return true;
}
//
// Allocate the GEMM workspace
//
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA>
tensor_A(problem_size.mk());
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB>
tensor_B(problem_size.kn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
tensor_C(problem_size.mn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
tensor_D(problem_size.mn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
reference_D(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
cutlass::reference::host::TensorCopy(reference_D.host_view(),
tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size, tensor_A.device_ref(), tensor_B.device_ref(),
tensor_C.device_ref(), tensor_D.device_ref(), {alpha, beta}};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size, alpha, tensor_A.host_ref(), tensor_B.host_ref(), beta,
reference_D.host_ref(), ElementAccumulator(0));
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_" << problem_size.m() << "x"
<< problem_size.n() << "x" << problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x" << Gemm::ThreadblockShape::kN
<< "x" << Gemm::ThreadblockShape::kK << "_" << Gemm::WarpShape::kM
<< "x" << Gemm::WarpShape::kN << "x" << Gemm::WarpShape::kK
<< ".txt";
std::ofstream file(fname.str());
file << "problem: " << problem_size << ", alpha: " << alpha
<< ", beta: " << beta << "\n\n";
file << "A =\n"
<< tensor_A.host_view() << "\nB =\n"
<< tensor_B.host_view() << "\nC =\n"
<< tensor_C.host_view() << "\n\nReference =\n"
<< reference_D.host_view() << "\nComputed =\n"
<< tensor_D.host_view();
}
return passed;
}
/// Runs a set of problem sizes
bool run_all() {
bool passed = true;
int problem_size_m[] = {16, 528};
int problem_size_n[] = {16, 528};
int problem_size_k[] = {Gemm::InstructionShape::kK,
Gemm::ThreadblockShape::kK * Gemm::kStages +
Gemm::InstructionShape::kK};
double problem_alpha[] = {1.0};
// TODO Try non zero beta value after multistaged epilogue is implemented
double problem_beta[] = {0.0};
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed =
run({m, n, k}, ElementCompute(alpha), ElementCompute(beta));
if (!passed) {
return false;
}
}
}
}
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
////////////////////////////////////////////////////////////////////////////////
| cutlass/test/unit/gemm/device/multistage_testbed.h/0 | {
"file_path": "cutlass/test/unit/gemm/device/multistage_testbed.h",
"repo_id": "cutlass",
"token_count": 3922
} | 61 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for Sm90 f16_f16_f16 with cooperative EVT epilogue
D = alpha * acc + beta * c + aux_load
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_evt.hpp"
#include "sm90_evt_operations.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
namespace test::gemm::device {
template <class ElementCompute, class ElementAccumulator, bool IsCNeed>
static constexpr auto select_evt_d() {
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
using BinaryCompute0 = Sm90EVT<Sm90Compute<
cutlass::multiplies,
ElementCompute,
ElementCompute,
RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementAccumulator>, // alpha
Sm90AccFetch // acc
>;
if constexpr (IsCNeed) {
using EVT_D = Sm90EVT<Sm90Compute<cutlass::homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>,
Sm90ScalarBroadcast<ElementAccumulator>, // beta
Sm90SrcFetch<ElementCompute>, // C
BinaryCompute0>;
return EVT_D{};
} else {
return BinaryCompute0{};
}
}
template <class Gemm, class GemmWithoutD>
bool testEVTAuxStoreWithoutD() {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (std::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
using GemmKernel = typename Gemm::GemmKernel;
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementD = typename Gemm::ElementD;
constexpr bool has_c = not cute::is_void_v<ElementC>;
cutlass::DeviceAllocation<ElementA> A_block;
cutlass::DeviceAllocation<ElementB> B_block;
cutlass::DeviceAllocation<cute::conditional_t<has_c, ElementC, ElementD>> C_block;
cutlass::DeviceAllocation<ElementD> D_block;
cutlass::DeviceAllocation<ElementD> aux_store_D_block;
cutlass::DeviceAllocation<uint8_t> workspace;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
int l = 1;
problem_size = ProblemShapeType{m, n, k, l};
// Run Base Gemm to get reference D
A_block.reset(m * k);
B_block.reset(k * n);
C_block.reset(m * n);
D_block.reset(m * n);
aux_store_D_block.reset(m * n);
Gemm gemm_op_base;
auto stride_A = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideA{}, cute::make_shape(m, k, cute::Int<1>{}));
auto stride_B = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideB{}, cute::make_shape(n, k, cute::Int<1>{}));
auto stride_C = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideC{}, cute::make_shape(m, n, cute::Int<1>{}));
auto stride_D = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideD{}, cute::make_shape(m, n, cute::Int<1>{}));
auto arguments_base = typename Gemm::Arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
A_block.get(), stride_A,
B_block.get(), stride_B
},
{ // Epilogue arguments
{}, // thread
has_c ? C_block.get() : nullptr, stride_C,
D_block.get(), stride_D,
}, // Epilogue arguments end
/*hw_info=*/{},
/*scheduler_args=*/{}
};
// check without D aux store
// set D to be void and use Sm90AuxStore to write to D
// and then the D is the same
GemmWithoutD gemm_op;
auto arguments = typename GemmWithoutD::Arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
A_block.get(), stride_A,
B_block.get(), stride_B
},
{ // Epilogue arguments
{}, // thread
has_c ? C_block.get() : nullptr, stride_C,
nullptr, stride_D,
}, // Epilogue arguments end
/*hw_info=*/{},
/*scheduler_args=*/{}
};
constexpr float beta [[maybe_unused]] = 1.0;
constexpr float alpha [[maybe_unused]] = 1.0;
using ElementC = typename GemmWithoutD::ElementC;
if constexpr (not has_c) {
arguments_base.epilogue.thread = {
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
};
arguments.epilogue.thread = {
// unary op: aux store D
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
},
{aux_store_D_block.get(), stride_D}
};
} else {
arguments_base.epilogue.thread = {
// ternary op : beta * C + (alpha * acc)
{{beta}}, // leaf op+args : beta
{}, // op+args : C
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
};
arguments.epilogue.thread = {
// unary op: aux store D
{
// ternary op : beta * C + (alpha * acc)
{{beta}}, // leaf op+args : beta
{}, // op+args : C
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
},
{aux_store_D_block.get(), stride_D}
};
}
cutlass::Status status;
cudaError_t result;
status = gemm_op_base.can_implement(arguments_base);
EXPECT_EQ(status, cutlass::Status::kSuccess) << "Error gemm base not supported";
size_t workspace_size_base = Gemm::get_workspace_size(arguments_base);
workspace.reset(workspace_size_base);
status = gemm_op_base.initialize(arguments_base, workspace.get());
status = gemm_op_base.run();
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Error at Base Kernel Sync.";
size_t workspace_size = GemmWithoutD::get_workspace_size(arguments);
workspace.reset(workspace_size);
status = gemm_op.can_implement(arguments);
EXPECT_EQ(status, cutlass::Status::kSuccess);
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
bool passed = cutlass::reference::device::BlockCompareEqual(aux_store_D_block.get(), D_block.get(), m * n);
if (!passed) {
return false;
}
}
}
}
return true;
}
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_VoidC_VoidD_AuxStoreF16_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_VoidC_VoidD_AuxStoreF16_ColumnMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_VoidC_VoidD_AuxStoreF32_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_WithC_VoidD_AuxStoreF16_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_WithC_VoidD_AuxStoreF16_ColumnMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_WithC_VoidD_AuxStoreF32_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_store.cu/0 | {
"file_path": "cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_store.cu",
"repo_id": "cutlass",
"token_count": 12184
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
Testbed for sparse operations not to be released for CUDA 11.0 GA. Expected release is 11.1.
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct SparseTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
static int const kSparse = Gemm::GemmKernel::kSparse;
static int const kMetaSizeInBits = Gemm::GemmKernel::kMetaSizeInBits;
static int const kMaxID2 = Gemm::GemmKernel::kMaxID2;
static int const kElementsPerElementE = Gemm::GemmKernel::kElementsPerElementE;
using ElementE = typename Gemm::GemmKernel::ElementE;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE = typename Gemm::GemmKernel::LayoutE;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_E;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A_uncompressed;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
cutlass::HostTensor<ElementE, LayoutE> tensor_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> tensor_E_reordered;
//
// Methods
//
SparseTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_),
init_B(init_B_),
init_C(init_C_),
init_E(init_E_),
seed(seed_) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse));
tensor_A_uncompressed.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
tensor_E.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
tensor_E_reordered.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_E.host_view(), seed, kMetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (kMaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(tensor_E.host_view(),
(ElementE)(content));
} else {
EXPECT_TRUE(false);
}
cutlass::reorder_meta(tensor_E_reordered.host_ref(), tensor_E.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_E_reordered.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nE =\n" << tensor_E.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(),
tensor_E.host_ref(), problem_size.m(), problem_size.k());
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A_uncompressed.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
tensor_E_reordered.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
// This failure is likely due to insufficient device capabilities. Waive the test.
if (status != cutlass::Status::kSuccess) {
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllSparseGemm() {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the reordering of operand E
int const kAlignmentM = std::max(((sizeof(typename Gemm::ElementE) == 2) ? 32 : 16),
kMinimumOperandElementSize);
int const kAlignmentN = 128 / kMinimumOperandElementSize;
int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM};
int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int problem_size_k[] = {Gemm::ThreadblockShape::kK,
Gemm::ThreadblockShape::kK * (Gemm::kStages + 1)};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
SparseTestbed<Gemm> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/test/unit/gemm/device/testbed_sparse.h/0 | {
"file_path": "cutlass/test/unit/gemm/device/testbed_sparse.h",
"repo_id": "cutlass",
"token_count": 6258
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the OrderedSequenceBarrier class
*/
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
using namespace cute;
//////////////////// KERNEL /////////////////////////
template<typename OrderedSequencer>
struct SharedStorage
{
typename OrderedSequencer::SharedStorage storage;
};
// Goal of this kernel is to complete deadlock-free
template<int Stages, int GroupCount, int ThreadsPerGroup>
__global__ static
void ordered_sequence_device(uint32_t const num_iterations)
{
extern __shared__ char shared_memory[];
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
SmemStorage& shared_storage = *reinterpret_cast<SmemStorage*>(shared_memory);
int group_idx = threadIdx.x / ThreadsPerGroup;
typename SequenceBarrier::Params params;
params.group_id = group_idx; // sequence ID
params.group_size = ThreadsPerGroup; // Number of threads / participants in a group
SequenceBarrier barrier(shared_storage.storage, params);
// Ensure All CTAs in Cluster have completed init before issuing commits
__syncthreads();
cute::cluster_arrive_relaxed();
cute::cluster_wait();
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < num_iterations; ++i){
barrier.wait();
// STAGE 1 CODE...
#ifndef NDEBUG
int thread_idx_in_group = threadIdx.x % ThreadsPerGroup;
if (thread_idx_in_group == 0) {
printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
barrier.wait();
// STAGE 2 CODE...
#ifndef NDEBUG
if (thread_idx_in_group == 0) {
printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
}
// To make sure remote SMEM doesn't get destroyed
cute::cluster_arrive();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
template<uint32_t Stages_, uint32_t GroupCount_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t ThreadsPerGroup = 128;
static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup;
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t GroupCount = GroupCount_;
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
//
// Methods
//
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = nullptr) {
// Pipeline (multistage pipeline)
auto cluster_shape = Shape<_1, _1, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
int smem_size = int(sizeof(SmemStorage));
result = cudaFuncSetAttribute(
ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with 128 thread per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape));
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(BlockSize,1,1);
const void* kernel = (const void*)ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>;
int iters = kNumIters;
void* kernel_params[] = {reinterpret_cast<void*>(&iters)};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
} // profiling loop ends
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) {
Options options;
static constexpr uint32_t GroupCount = 2;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) {
Options options;
static constexpr uint32_t GroupCount = 3;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) {
Options options;
static constexpr uint32_t GroupCount = 4;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) {
Options options;
static constexpr uint32_t GroupCount = 5;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| cutlass/test/unit/pipeline/sequence_barrier.cu/0 | {
"file_path": "cutlass/test/unit/pipeline/sequence_barrier.cu",
"repo_id": "cutlass",
"token_count": 2644
} | 64 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Layout type identifier
enum class LayoutTypeID {
kUnknown,
kColumnMajor,
kRowMajor,
kColumnMajorInterleavedK2,
kRowMajorInterleavedK2,
kColumnMajorInterleavedK4,
kRowMajorInterleavedK4,
kColumnMajorInterleavedK16,
kRowMajorInterleavedK16,
kColumnMajorInterleavedK32,
kRowMajorInterleavedK32,
kColumnMajorInterleavedK64,
kRowMajorInterleavedK64,
kTensorNCHW,
kTensorNCDHW,
kTensorNHWC,
kTensorNDHWC,
kTensorNC32HW32,
kTensorC32RSK32,
kTensorNC64HW64,
kTensorC64RSK64,
kInvalid
};
/// Numeric data type
enum class NumericTypeID {
kUnknown,
kVoid,
kB1,
kU2,
kU4,
kU8,
kU16,
kU32,
kU64,
kS2,
kS4,
kS8,
kS16,
kS32,
kS64,
kFE4M3,
kFE5M2,
kF16,
kBF16,
kTF32,
kF32,
kF64,
kCF16,
kCBF16,
kCF32,
kCTF32,
kCF64,
kCS2,
kCS4,
kCS8,
kCS16,
kCS32,
kCS64,
kCU2,
kCU4,
kCU8,
kCU16,
kCU32,
kCU64,
kInvalid
};
/// Enumerated type describing a transformation on a complex value.
enum class ComplexTransform {
kNone,
kConjugate,
kInvalid
};
/// Providers
enum class Provider {
kNone,
kCUTLASS,
kReferenceHost,
kReferenceDevice,
kCUBLAS,
kCUDNN,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating the kind of operation
enum class OperationKind {
kGemm,
kRankK,
kRank2K,
kTrmm,
kSymm,
kConv2d,
kConv3d,
kEqGemm,
kSparseGemm,
kReduction,
kInvalid
};
/// Enumeration indicating whether scalars are in host or device memory
enum class ScalarPointerMode {
kHost,
kDevice,
kInvalid
};
/// Describes how reductions are performed across threadblocks
enum class SplitKMode {
kNone,
kSerial,
kParallel,
kParallelSerial,
kInvalid
};
/// Indicates the classificaition of the math instruction
enum class OpcodeClassID {
kSimt,
kTensorOp,
kWmmaTensorOp,
kSparseTensorOp,
kInvalid
};
enum class MathOperationID {
kAdd,
kMultiplyAdd,
kMultiplyAddSaturate,
kMultiplyAddMixedInputUpcast,
kMultiplyAddFastBF16,
kMultiplyAddFastF16,
kMultiplyAddFastF32,
kMultiplyAddComplex,
kMultiplyAddComplexFastF32,
kMultiplyAddGaussianComplex,
kXorPopc,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating what kind of GEMM operation to perform
enum class GemmKind {
kGemm,
kSparse,
kUniversal,
kPlanarComplex,
kPlanarComplexArray,
kGrouped,
kInvalid
};
/// Enumeration indicating what kind of RankK update operation to perform
enum class RankKKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of TRMM operation to perform
enum class TrmmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of SYMM/HEMM operation to perform
enum class SymmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of Conv2d operation to perform
enum class ConvKind {
kUnknown,
kFprop,
kDgrad,
kWgrad,
kInvalid
};
enum class ConvModeID {
kCrossCorrelation,
kConvolution,
kInvalid
};
// Iterator algorithm enum in order of general performance-efficiency
enum class IteratorAlgorithmID {
kNone,
kAnalytic,
kOptimized,
kFixedChannels,
kFewChannels,
kInvalid
};
enum class EpilogueKind {
kUnknown,
kConversion,
kLinearCombination,
kLinearCombinationClamp,
kLinearCombinationPlanarComplex,
kLinearCombinationRelu,
kLinearCombinationSigmoid,
kInvalid
};
enum class RasterOrder {
kAlongN,
kAlongM,
kHeuristic,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/tools/library/include/cutlass/library/types.h/0 | {
"file_path": "cutlass/tools/library/include/cutlass/library/types.h",
"repo_id": "cutlass",
"token_count": 1897
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "conv_reference_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_conv2d_reference_operations(Manifest &manifest) {
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t,
cutlass::half_t
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>,
cutlass::complex<float>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<uint8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::int4b_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::uint4b_t, float>
>(manifest);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/tools/library/src/reference/conv2d.cu/0 | {
"file_path": "cutlass/tools/library/src/reference/conv2d.cu",
"repo_id": "cutlass",
"token_count": 2557
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include "cutlass/library/library.h"
#define TRACE(x) { std::cout << __FILE__ << ":" << __LINE__ << " " << x << std::endl; }
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
T from_string(std::string const &);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing how the performance testbench evaluates kernels.
enum class ExecutionMode {
kProfile, ///< regular verification and profiling
kDryRun, ///< no kernels are launched or workspaces allocated; used to assess what operators might be launched
kEnumerate, ///< no kernels launched or workspaces allocated; lists all operation kind and operations
kTrace, ///< executes a single device-side computation with no other kernel launches
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(ExecutionMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
ExecutionMode from_string<ExecutionMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Library algorithm mode
enum class AlgorithmMode {
kMatching, ///< compare against best matching algorithm
kBest, ///< evaluate all library algorithms and report best
kDefault, ///< use the library's default algorithm option
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(AlgorithmMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
AlgorithmMode from_string<AlgorithmMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Outcome of a performance test
enum class Disposition {
kPassed,
kFailed,
kNotRun,
kIncorrect,
kNotVerified,
kInvalidProblem,
kNotSupported,
kInvalid
};
/// Converts a Disposition enumerant to a string
char const *to_string(Disposition disposition, bool pretty = false);
/// Parses a Disposition enumerant from a string
template <>
Disposition from_string<Disposition>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates when to save
enum class SaveWorkspace {
kNever,
kIncorrect,
kAlways,
kInvalid
};
/// Converts a SaveWorkspace enumerant to a string
char const *to_string(SaveWorkspace save_option, bool pretty = false);
/// Parses a SaveWorkspace enumerant from a string
template <>
SaveWorkspace from_string<SaveWorkspace>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates the type of kernel argument
// ArgumentType can be both ScalarType or NumericType. Thus, enums kScalar and kNumeric
// 1) kScalar: e.g. of a Scalar ArgumentType is u32 is a Scalar type.
// Its c++ equivalent as "type name = initializer" is "u32 m = 32"
// 2) kNumeric: e.g. of a Numeric ArgumentType is NumericTypeID is a Numeric type.
// Its c++ equivalent as "type name = initializer" is "NumericTypeID numeric_type = u32"
enum class ArgumentTypeID {
kScalar,
kInteger,
kTensor,
kBatchedTensor,
kStructure,
kEnumerated,
kInvalid
};
/// Converts a ArgumentTypeID enumerant to a string
char const *to_string(ArgumentTypeID type, bool pretty = false);
/// Parses a ArgumentTypeID enumerant from a string
template <>
ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Profiler typedefs
using ProviderVector = std::vector<library::Provider>;
using DispositionMap = std::map<library::Provider, Disposition>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Print vector for the report
template <typename T>
std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) {
for (size_t i = 0; i < v.size(); ++i) {
out << to_string(v[i], true) << (i + 1u != v.size() ? "," : "");
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h/0 | {
"file_path": "cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h",
"repo_id": "cutlass",
"token_count": 1677
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#include <stdexcept>
#if CUTLASS_ENABLE_CUBLAS
#include "cutlass/profiler/cublas_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(cublasStatus_t cublas) {
switch (cublas) {
case CUBLAS_STATUS_SUCCESS:
return Status::kSuccess;
case CUBLAS_STATUS_INVALID_VALUE:
return Status::kErrorInvalidProblem;
case CUBLAS_STATUS_NOT_SUPPORTED:
return Status::kErrorNotSupported;
default: break;
}
return Status::kErrorInternal;
}
/// Converts a cuBLAS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cublasStatus_t cublas_status) {
if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
cublasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform) {
switch (layout) {
case library::LayoutTypeID::kColumnMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_N;
return true;
}
else {
return false;
}
break;
case library::LayoutTypeID::kRowMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_T;
return true;
}
else if (transform == library::ComplexTransform::kConjugate) {
operation = CUBLAS_OP_C;
return true;
}
break;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kFE4M3:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E4M3;
return true;
#endif
break;
case library::NumericTypeID::kFE5M2:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E5M2;
return true;
#endif
break;
case library::NumericTypeID::kF16:
data_type = CUDA_R_16F;
return true;
case library::NumericTypeID::kBF16:
data_type = CUDA_R_16BF;
return true;
case library::NumericTypeID::kTF32:
break;
case library::NumericTypeID::kF32:
data_type = CUDA_R_32F;
return true;
case library::NumericTypeID::kF64:
data_type = CUDA_R_64F;
return true;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
data_type = CUDA_R_8I;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
data_type = CUDA_R_32I;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
data_type = CUDA_R_8U;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
data_type = CUDA_R_32U;
return true;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kCF32:
data_type = CUDA_C_32F;
return true;
case library::NumericTypeID::kCF64:
data_type = CUDA_C_64F;
return true;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps a cutlass::SideMode to cuBLAS side mode
bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) {
switch (side_mode) {
case SideMode::kLeft:
side = CUBLAS_SIDE_LEFT;
return true;
case SideMode::kRight:
side = CUBLAS_SIDE_RIGHT;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::FillMode to cuBLAS fill mode
bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) {
switch (fill_mode) {
case FillMode::kLower:
uplo = CUBLAS_FILL_MODE_LOWER;
return true;
case FillMode::kUpper:
uplo = CUBLAS_FILL_MODE_UPPER;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::DiagType to cuBLAS diag type
bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) {
switch (diag_type) {
case DiagType::kNonUnit:
diag = CUBLAS_DIAG_NON_UNIT;
return true;
case DiagType::kUnit:
diag = CUBLAS_DIAG_UNIT;
return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) {
return (opcode_class == library::OpcodeClassID::kSimt ?
CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasGemmExDispatcher::cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
cublasGemmAlgo_t algorithm
):
configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
op_desc.B.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes GEMM using these arguments
cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) {
if (configuration.mode == library::GemmUniversalMode::kBatched) {
return cublasGemmStridedBatchedEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.batch_stride_A,
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.batch_stride_B,
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
arguments.batch_stride_C,
configuration.batch_count,
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
else {
return cublasGemmEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasRankKDispatcher::cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
num_ranks = op_desc.num_ranks;
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes RankK using these arguments
cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) {
// SYRK and HERK
if (num_ranks == 1) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
// SYR2K and HER2K
else if (num_ranks == 2) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.D.element == library::NumericTypeID::kS4 ||
desc.D.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasTrmmDispatcher::cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_diag_type(diag, op_desc.diag_type));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_D, op_desc.D.element));
// if A is Transposed, then for cuBLAS that is inverted Fill Mode.
if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) {
if (uplo == CUBLAS_FILL_MODE_LOWER)
uplo = CUBLAS_FILL_MODE_UPPER;
else
uplo = CUBLAS_FILL_MODE_LOWER;
}
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes TRMM using these arguments
cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) {
if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) {
return cublasDtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<double*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasStrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<float*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) {
return cublasZtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasCtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldd)
);
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular Symm description
Status cublas_satisfies(library::SymmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.B.element == library::NumericTypeID::kBF16 ||
desc.B.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// only column major layout is supported in cuBLAS
if (desc.A.layout != library::LayoutTypeID::kColumnMajor ||
desc.transform_A != library::ComplexTransform::kNone) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasSymmDispatcher::cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
bool good = true;
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes Symm using these arguments
cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) {
// SYMM and HEMM
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZhemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasChemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
| cutlass/tools/profiler/src/cublas_helpers.cu/0 | {
"file_path": "cutlass/tools/profiler/src/cublas_helpers.cu",
"repo_id": "cutlass",
"token_count": 15733
} | 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/sparse_gemm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kSparseGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. sparse, ...)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"},
}
) {
description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C";
}
/// Destructor
SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {
}
/// Prints usage statement for the math function
void SparseGemmOperationProfiler::print_usage(std::ostream &out) const {
out << "Sparse GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void SparseGemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=SparseGemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=SparseGemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=SparseGemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Status SparseGemmOperationProfiler::SparseGemmProblem::parse(
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->elements_per_128b =
128 / library::sizeof_bits(operation_desc.A.element);
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout,
{int(this->m), int(this->k) / int(this->sparse)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
this->lde =
DeviceAllocation::get_packed_layout(
operation_desc.E.layout,
{int(this->m), int(this->k / this->sparse / this->elements_per_128b)})
.front();
return Status::kSuccess;
}
/// Initializes a performance result
void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result(
PerformanceResult &result,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "E", problem_space,
std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/// Extracts the problem dimensions
Status SparseGemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kSparse) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
gemm_workspace_.configuration.lde = problem_.lde;
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.E = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void SparseGemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Input bytes read and Output bytes written for the gemm problem
result.bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) *
problem_.k / problem_.sparse +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) *
problem_.n +
int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) *
problem_.k / problem_.sparse / problem_.elements_per_128b;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n;
}
result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n);
result.runtime = 0;
}
/// Initializes workspace
Status SparseGemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k) / int(problem_.sparse)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.E = device_context.allocate_sparsemeta_tensor(
options,
"E",
operation_desc.E.element,
operation_desc.E.layout,
operation_desc.A.element,
{int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)},
{int(problem_.lde)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kSparseGemm;
results_.back().disposition = Disposition::kNotRun;
for(auto &verification_provider : options.verification.providers) {
results_.back().verification_map[verification_provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SparseGemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool SparseGemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu/0 | {
"file_path": "cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu",
"repo_id": "cutlass",
"token_count": 7352
} | 69 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/device_utils.h"
#include <float.h>
namespace cutlass {
__global__ void rmsnorm_twoPassAlgo_e8(float4 *output, const float4 *input,
const float4 *weight,
const int m, const int n, float epsilon) {
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
const int n_8 = n / 8;
int offset = m_idx * n_8;
input += offset;
output += offset;
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const half2 *h1 = (half2 *)&local_val.x;
const half2 *h2 = (half2 *)&local_val.y;
const half2 *h3 = (half2 *)&local_val.z;
const half2 *h4 = (half2 *)&local_val.w;
local_sums[0] += static_cast<float>(h1->x) * static_cast<float>(h1->x) +
static_cast<float>(h1->y) * static_cast<float>(h1->y) +
static_cast<float>(h2->x) * static_cast<float>(h2->x) +
static_cast<float>(h2->y) * static_cast<float>(h2->y) +
static_cast<float>(h3->x) * static_cast<float>(h3->x) +
static_cast<float>(h3->y) * static_cast<float>(h3->y) +
static_cast<float>(h4->x) * static_cast<float>(h4->x) +
static_cast<float>(h4->y) * static_cast<float>(h4->y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
} else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const float4 weight_val = weight[index];
const half2 *l1 = (half2 *)&local_val.x;
const half2 *l2 = (half2 *)&local_val.y;
const half2 *l3 = (half2 *)&local_val.z;
const half2 *l4 = (half2 *)&local_val.w;
const half2 *g1 = (half2 *)&weight_val.x;
const half2 *g2 = (half2 *)&weight_val.y;
const half2 *g3 = (half2 *)&weight_val.z;
const half2 *g4 = (half2 *)&weight_val.w;
float4 tmp;
half2 *h1 = (half2 *)&tmp.x;
half2 *h2 = (half2 *)&tmp.y;
half2 *h3 = (half2 *)&tmp.z;
half2 *h4 = (half2 *)&tmp.w;
h1->x = half(static_cast<float>(l1->x) * s_mean * static_cast<float>(g1->x));
h1->y = half(static_cast<float>(l1->y) * s_mean * static_cast<float>(g1->y));
h2->x = half(static_cast<float>(l2->x) * s_mean * static_cast<float>(g2->x));
h2->y = half(static_cast<float>(l2->y) * s_mean * static_cast<float>(g2->y));
h3->x = half(static_cast<float>(l3->x) * s_mean * static_cast<float>(g3->x));
h3->y = half(static_cast<float>(l3->y) * s_mean * static_cast<float>(g3->y));
h4->x = half(static_cast<float>(l4->x) * s_mean * static_cast<float>(g4->x));
h4->y = half(static_cast<float>(l4->y) * s_mean * static_cast<float>(g4->y));
output[index] = tmp;
}
}
template<typename T>
__global__ void rmsnorm_twoPassAlgo_e1(T* output,
const T* input,
const T* weight,
const int m, const int n,
float epsilon)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_sums[0] += local_val * local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid ; index < n ; index += bdimx){
const T weight_val = weight[index];
const T local_val = input[index];
output[index] = T(static_cast<float>(local_val) * s_mean * static_cast<float>(weight_val));
}
}
template <typename T>
void rmsnorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_weight,
cudaStream_t stream, float epsilon = 1e-5f){
const int m = tensor_size.row();
const int n = tensor_size.column();
T* output = ref_output.data();
const T* input = ref_input.data();
const T* weight = ref_weight.data();
dim3 grid(m);
if (n % 8 == 0 && std::is_same<T, cutlass::half_t>::value) {
dim3 block(min(1024, (n / 8 + 31) / 32 * 32));
rmsnorm_twoPassAlgo_e8<<<grid, block, 0, stream>>>(
(float4 *)output, (const float4 *)input, (const float4 *)weight, m, n, epsilon);
} else {
dim3 block(min(1024, ((n + 31)/32 + 31)/32*32));
rmsnorm_twoPassAlgo_e1<<<grid, block, 0, stream>>>(
output, input, weight, m, n, epsilon);
}
auto result = cudaGetLastError();
if (result != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(result) << std::endl;
abort();
}
}
} // namespace cutlass
| cutlass/tools/util/include/cutlass/util/device_rmsnorm.h/0 | {
"file_path": "cutlass/tools/util/include/cutlass/util/device_rmsnorm.h",
"repo_id": "cutlass",
"token_count": 3049
} | 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/kernel/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
// Launch a GEMM kernel
kernel::Gemm<
TensorRef<ElementA, LayoutA>,
TensorRef<ElementB, LayoutB>,
TensorRef<ElementC, LayoutC>,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp,
typename ConvertOp
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
static_assert(
TensorRefCollectionA::kRank == 2 &&
TensorRefCollectionB::kRank == 2 &&
TensorRefCollectionC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn),
batch_count
);
// Launch a GEMM kernel
kernel::BatchedGemm<
TensorRefCollectionA,
TensorRefCollectionB,
TensorRefCollectionC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
initial_accum
);
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| cutlass/tools/util/include/cutlass/util/reference/device/gemm.h/0 | {
"file_path": "cutlass/tools/util/include/cutlass/util/reference/device/gemm.h",
"repo_id": "cutlass",
"token_count": 5124
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Index of the active rank
static int const kActiveRank = Rank - RankRemaining - 1;
/// Constructor for general rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
TensorForEachHelper<Func, Rank, RankRemaining - 1>(func, extent, coord);
}
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Index of the active rank
static int const kActiveRank = Rank - 1;
/// Constructor for fastest changing rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEach(Coord<Rank> extent, Func & func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor and calls a C++ lambda
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEachLambda(Coord<Rank> extent, Func func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params()) {
Func func(params);
for (size_t index = 0; index < capacity; ++index) {
ptr[index] = func();
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h/0 | {
"file_path": "cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h",
"repo_id": "cutlass",
"token_count": 1327
} | 72 |
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if(DEFINED CUDNN_ENABLED)
set(CUTLASS_ENABLE_CUDNN ${CUDNN_ENABLED} CACHE BOOL "Enable CUTLASS to build with cuDNN library.")
endif()
if(DEFINED CUTLASS_ENABLE_CUDNN AND NOT CUTLASS_ENABLE_CUDNN)
return()
endif()
message(STATUS "Configuring cuDNN ...")
find_path(
_CUDNN_INCLUDE_DIR cudnn.h
PATHS
${CUDA_TOOLKIT_ROOT_DIR}/include
$ENV{CUDNN_PATH}/include
$ENV{CUDA_PATH}/include
${CUDNN_PATH}/include
/usr/include)
find_library(
_CUDNN_LIBRARY cudnn
HINTS
${CUDA_TOOLKIT_ROOT_DIR}/lib64
${CUDA_TOOLKIT_ROOT_DIR}/lib/x64
${CUDA_TOOLKIT_ROOT_DIR}/lib
$ENV{CUDNN_PATH}/lib64
$ENV{CUDNN_PATH}/lib/x64
$ENV{CUDNN_PATH}/lib
$ENV{CUDA_PATH}/lib64
$ENV{CUDA_PATH}/lib/x64
$ENV{CUDA_PATH}/lib
${CUDNN_PATH}/lib64
${CUDNN_PATH}/lib/x64
${CUDNN_PATH}/lib
/usr/lib/x86_64-linux-gnu
/usr/lib)
if(_CUDNN_INCLUDE_DIR AND _CUDNN_LIBRARY)
message(STATUS "cuDNN: ${_CUDNN_LIBRARY}")
message(STATUS "cuDNN: ${_CUDNN_INCLUDE_DIR}")
set(CUDNN_FOUND ON CACHE INTERNAL "cuDNN Library Found")
else()
message(STATUS "cuDNN not found.")
set(CUDNN_FOUND OFF CACHE INTERNAL "cuDNN Library Found")
endif()
set(CUTLASS_ENABLE_CUDNN ${CUDNN_FOUND} CACHE BOOL "Enable CUTLASS to build with cuDNN library.")
if (CUTLASS_ENABLE_CUDNN AND NOT TARGET cudnn)
set(CUDNN_INCLUDE_DIR ${_CUDNN_INCLUDE_DIR})
set(CUDNN_LIBRARY ${_CUDNN_LIBRARY})
if(WIN32)
add_library(cudnn STATIC IMPORTED GLOBAL)
else()
add_library(cudnn SHARED IMPORTED GLOBAL)
endif()
add_library(nvidia::cudnn ALIAS cudnn)
set_property(
TARGET cudnn
PROPERTY IMPORTED_LOCATION
${CUDNN_LIBRARY})
target_include_directories(
cudnn
INTERFACE
$<INSTALL_INTERFACE:include>
$<BUILD_INTERFACE:${CUDNN_INCLUDE_DIR}>)
endif()
if(CUTLASS_ENABLE_CUDNN AND NOT CUDNN_FOUND)
message(FATAL_ERROR "CUTLASS_ENABLE_CUDNN enabled but cuDNN library could not be found.")
endif()
message(STATUS "Configuring cuDNN ... done.")
| cutlass/cuDNN.cmake/0 | {
"file_path": "cutlass/cuDNN.cmake",
"repo_id": "cutlass",
"token_count": 1452
} | 0 |
var searchData=
[
['operation',['Operation',['../classcutlass_1_1library_1_1Operation.html',1,'cutlass::library']]],
['operationdescription',['OperationDescription',['../structcutlass_1_1library_1_1OperationDescription.html',1,'cutlass::library']]],
['outputtileoptimalthreadmap',['OutputTileOptimalThreadMap',['../structcutlass_1_1epilogue_1_1threadblock_1_1OutputTileOptimalThreadMap.html',1,'cutlass::epilogue::threadblock']]],
['outputtileshape',['OutputTileShape',['../structcutlass_1_1epilogue_1_1threadblock_1_1OutputTileShape.html',1,'cutlass::epilogue::threadblock']]],
['outputtilethreadmap',['OutputTileThreadMap',['../structcutlass_1_1epilogue_1_1threadblock_1_1OutputTileThreadMap.html',1,'cutlass::epilogue::threadblock']]]
];
| cutlass/docs/search/classes_d.js/0 | {
"file_path": "cutlass/docs/search/classes_d.js",
"repo_id": "cutlass",
"token_count": 258
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const *B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta,
cutlass::half_t *C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/01_cutlass_utilities/cutlass_utilities.cu/0 | {
"file_path": "cutlass/examples/01_cutlass_utilities/cutlass_utilities.cu",
"repo_id": "cutlass",
"token_count": 4609
} | 2 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of running grouped back-to-back GEMMs when intermediate results are RF resident
*/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/base_grouped.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "device/b2b_gemm.h"
#include "kernel/default_b2b_gemm.h"
#include "threadblock/grouped_threadblock_swizzle.h"
#include "b2b_grouped_gemm_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
std::vector<cutlass::gemm::GemmCoord> gemm_f16_sm80_problem_sizes_0;
std::vector<cutlass::gemm::GemmCoord> gemm_f16_sm80_problem_sizes_1;
// Constraints:
// 1. Warp shape N must equal thread block shape N
// 2. Problem size N must equal thread block shape N
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 32>;
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
int alignment = 8;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
int problem_count;
bool verbose;
//
// Methods
//
Options():
help(false),
error(false),
reference_check(true),
problem_count(15),
verbose(false)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("problems", problem_count, 15);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("verbose", verbose, false);
randomize_problems(cmd);
}
void randomize_problems(cutlass::CommandLine &cmd) {
//
// For now, randomly choose the problem sizes.
//
int cmd_line_m = -1;
int cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("k", cmd_line_k);
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
for (int i = 0; i < problem_count; ++i) {
int m = cmd_line_m;
int k = cmd_line_k;
if (m < 1) {
m = alignment * ((rand() % 256) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 256) + 1);
}
cutlass::gemm::GemmCoord problem0(m, ThreadblockShape0::kN, k);
cutlass::gemm::GemmCoord problem1(m, ThreadblockShape1::kN, ThreadblockShape0::kN);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
}
if (verbose) {
print_problem_sizes();
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "13_fused_two_gemms_grouped_f16_sm80_rf\n\n"
<< " This example runs a grouped back-to-back GEMM kernel. A group of independent back-to-back GEMMs are\n"
<< " run in a single kernel. Each indivdual problem in the group is subject to the same constraints that non-grouped\n"
<< " back-to-back GEMMs are subject to.s"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --problems=<int> Number of individual GEMM problems (default: --problems=15)\n"
<< " --m=<int> Sets the M dimension of both GEMMs for all groups. Otherwise, it is selected randomly\n"
<< " --k=<int> Sets the K dimension of the first GEMM for all groups. Otherwise, it is selected randomly\n"
<< " --verbose=<bool> If true, prints problem sizes.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a grouped B2b GEMM with 10 random problem sizes\n"
<< "$ ./examples/13_two_tensor_op_fusion/13_fused_two_gemms_grouped_f16_sm80_rf --groups=10\n\n";
return out;
}
void print_problem_sizes() {
std::cout << std::endl;
std::cout << "Executing " << problem_count << " independent back-to-back GEMMs in a group" << std::endl;
for (int i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem0 = problem_sizes0.at(i);
cutlass::gemm::GemmCoord problem1 = problem_sizes1.at(i);
std::cout << "Problem " << i
<< "\t\tGEMM0: " << problem0.m() << 'x' << problem0.n() << 'x' << problem0.k()
<< "\t\tGEMM1: " << problem1.m() << 'x' << problem1.n() << 'x' << problem1.k()
<< std::endl;
}
}
};
bool run_fused_grouped_gemm_f16_sm80_rf_res() {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
InstructionShape::kM * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
using GroupedThreadblockSwizzle = cutlass::gemm::threadblock::B2bGemmGroupedThreadblockSwizzle<
ThreadblockShape0,
cutlass::layout::RowMajor // LayoutC
>;
const int kAlignment = 128 / cutlass::sizeof_bits<ElementOutput>::value;
const int kStages = 3;
using B2bGemmKernel = cutlass::gemm::kernel::DefaultB2bGemm<
cutlass::half_t,
cutlass::layout::RowMajor,
kAlignment,
cutlass::half_t,
cutlass::layout::ColumnMajor,
kAlignment,
cutlass::half_t,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
GroupedThreadblockSwizzle,
kStages,
cutlass::arch::OpMultiplyAdd
>::B2bGemmKernel;
using B2bGemm = cutlass::gemm::device::BaseGrouped<B2bGemmKernel>;
B2bFusedGroupedGemmRun<B2bGemm> fusedGemm;
std::cout << "Running Fused back-to-back FP16 TN Grouped GEMMs with RF residency...\n";
bool passed = fusedGemm.run(gemm_f16_sm80_problem_sizes_0, gemm_f16_sm80_problem_sizes_1, alpha0, beta0, alpha1, beta1);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
int main(int argc, char const **args) {
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
gemm_f16_sm80_problem_sizes_0 = options.problem_sizes0;
gemm_f16_sm80_problem_sizes_1 = options.problem_sizes1;
std::vector<bool (*)()>funcs = {
&run_fused_grouped_gemm_f16_sm80_rf_res
};
return testRun(80, funcs, "grouped gemm f16 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/13_two_tensor_op_fusion/fused_two_gemms_grouped_f16_sm80_rf.cu/0 | {
"file_path": "cutlass/examples/13_two_tensor_op_fusion/fused_two_gemms_grouped_f16_sm80_rf.cu",
"repo_id": "cutlass",
"token_count": 4015
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines device-side elementwise operations on TensorView. Note, the operations defined
in this header are not specialized for any particular data layout and are therefore not
intended to offer the best possible performance. Rather, they are intended to be generic
reference implementations to support the CUTLASS unit tests.
*/
#pragma once
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename OutputTile,
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>
>
__global__ void TensorScaleBiasGemm(
gemm::GemmCoord problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias ///< bias tensor
) {
ConvertOp convert_op;
MatrixCoord output_coord(
MatrixCoord::Index((threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kRow),
MatrixCoord::Index((threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kColumn)
);
// Update the output tensor
for (int j = 0; j < OutputTile::kRow; ++j) {
for (int i = 0; i < OutputTile::kColumn; ++i) {
MatrixCoord coord = output_coord + MatrixCoord(i, j);
if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, coord.column()});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, coord.column()});
tensor_out.at(coord) = convert_op(
scale * ScalarType(tensor_in.at(coord)) + bias);
}
}
}
}
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>,
int kMblock = 4,
int kNblock = 4
>
__global__ void TensorScaleBiasGemmBatched(
gemm::GemmCoord problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias, ///< bias tensor
int batch_count = 1,
int64_t batch_stride_tensor_in = 0,
int64_t batch_stride_tensor_out = 0,
int64_t batch_stride_tensor_scale = 0,
int64_t batch_stride_tensor_bias = 0
) {
ConvertOp convert_op;
int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock;
int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock;
int batch_idx = blockIdx.z;
tensor_in.add_pointer_offset(batch_idx * batch_stride_tensor_in);
tensor_out.add_pointer_offset(batch_idx * batch_stride_tensor_out);
tensor_scale.add_pointer_offset(batch_idx * batch_stride_tensor_scale);
tensor_bias.add_pointer_offset(batch_idx * batch_stride_tensor_bias);
for (; batch_idx < batch_count; batch_idx += gridDim.z) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, coord.column()});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, coord.column()});
tensor_out.at(coord) = convert_op(
scale * ScalarType(tensor_in.at(coord)) + bias);
}
}
}
tensor_in.add_pointer_offset(batch_stride_tensor_in * gridDim.z);
tensor_out.add_pointer_offset(batch_stride_tensor_out * gridDim.z);
tensor_scale.add_pointer_offset(batch_stride_tensor_scale * gridDim.z);
tensor_bias.add_pointer_offset(batch_stride_tensor_bias * gridDim.z);
}
}
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>,
int kThreadM = 4, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void TensorScaleBiasConv2d(
conv::Conv2dProblemSize problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias ///< bias tensor
) {
ConvertOp convert_op;
int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t npq = npq_start + m;
thread_n[m] = int(npq / PQ);
int64_t residual = npq % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, thread_k});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, thread_k});
tensor_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
scale * ScalarType(
tensor_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k})
) + bias);
}
}
}
}
}
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasGemm(
gemm::GemmCoord problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias ///< bias tensor
) {
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
kernel::TensorScaleBiasGemm<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
OutputTile,
ConvertOp
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias
);
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasGemmBatched(
gemm::GemmCoord problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias, ///< bias tensor
int batch_count = 1,
int64_t batch_stride_tensor_in = 0,
int64_t batch_stride_tensor_out = 0,
int64_t batch_stride_tensor_scale = 0,
int64_t batch_stride_tensor_bias = 0
) {
int const kMblock = 4;
int const kNblock = 4;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock),
(problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock),
batch_count % std::numeric_limits<uint16_t>::max()
);
kernel::TensorScaleBiasGemmBatched<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
ConvertOp,
kMblock,
kNblock
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias,
batch_count,
batch_stride_tensor_in,
batch_stride_tensor_out,
batch_stride_tensor_scale,
batch_stride_tensor_bias
);
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasConv2d(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias ///< bias tensor
) {
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q;
int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::TensorScaleBiasConv2d<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
ConvertOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| cutlass/examples/13_two_tensor_op_fusion/reference/device/tensor_scale_bias.h/0 | {
"file_path": "cutlass/examples/13_two_tensor_op_fusion/reference/device/tensor_scale_bias.h",
"repo_id": "cutlass",
"token_count": 6022
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
// This example fuses gather before GEMM and scatter after GEMM into the same
// GEMM kernel. Gather and scatter operation is controled by an index vector
// to select rows or columns from A, B, C or D matrices.
//
// Suppose, all matrices are column major. The pseudo code of the fused kernel
// in this example is essentially
//
// for (int i = 0; i < problem_size.m(); ++i) {
// for (int j = 0; j < options.index_size; ++j) {
// int b_c_d_col = tensor_indices.at({j, 0});
//
// for (int k = 0; k < options.index_size; ++k) {
// tensor_d_ref.at({i, b_c_d_col}) +=
// alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
// }
// }
//
// Note that the index vector contains unique random integers with max to be N - 1
//
// The gather/scatter operation works best when we can still keep the biggest
// alignment. For example, when the matrix is row major, we select rows. When
// the matrix is column major, we select columns.
//
// Not all the combination of gather and scatter are legal. For example, if A is
// row major and C/D is column major, we cannot gather A and scatter C/D at the
// same time.
//
// Also, we don't check the index value is legal and index array point is valid
// for the sake of the performance.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int index_size;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({248, 1024, 1024}),
index_size(240),
reference_check(true),
iterations(20) { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("index_size", index_size);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "36_gather_scatter_fusion example\n\n"
<< " This example uses the CUTLASS Library to fuse gather/scatter into GEMM\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --index_size=<int> size of N dimension index\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/36_gather_scatter_fusion/36_gather_scatter_fusion --m=1024 --n=512 --k=1024 \\\n"
<< " --index_size=128\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.m() * int64_t(index_size) * problem_size.k();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices.
// Column Major for Matrix A, B and C.
//
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 4
// 16, 8, 8 -> Turing
// 16, 8, 16 -> Ampere
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// Define the epilogue operation as LinearCombination. This is approximately equal to
//
// d_ij = alpha * sum_k(a_ik * b_kj) + c_ij
//
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 5;
// Ampere -> 4/5
// Turing -> 2
using Gemm = cutlass::gemm::device::GemmUniversal<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
8, /*alignmentA*/
8, /*alignmentB*/
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kNone,
false, /*GatherA*/
true, /*GatherB*/
true /*ScatterD*/
>;
int run(Options &options) {
// ================================================================================
// Initialization setup
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size_real(problem_size.m(),
options.index_size,
problem_size.k());
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_scattered(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d_scattered.host_view()); // <- fill matrix D on host with zeros
cutlass::HostTensor<int, LayoutOutput> tensor_indices(
{options.index_size, 1}); // <- Create scatter indices with dimensions val_len x 1
// <- Fill tensor_b_indices on host with unique random integers
std::vector<int> to_fill(problem_size.n()) ; // vector with ints.
std::iota (std::begin(to_fill), std::end(to_fill), 0); // Fill with 0, 1, ...., problem_size.n()
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(to_fill.begin(), to_fill.end(), source_of_randomness);
}
memcpy(tensor_indices.host_data(), to_fill.data(), options.index_size * sizeof(int));
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_indices.sync_device();
tensor_c.sync_device();
tensor_d_scattered.sync_device();
// Initialize alpha/beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(1);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size_real, // <- problem size of matrix multiplication
split_k_slices, // <- k-dimension split factor
{alpha, beta}, // <- alpha, beta
tensor_a.device_data(), // <- reference to matrix A on device
tensor_b.device_data(), // <- reference to matrix B on device
tensor_c.device_data(), // <- reference to matrix C on device
tensor_d_scattered.device_data(), // <- reference to matrix D on device
tensor_a.layout().capacity(problem_size.mk()),
tensor_b.layout().capacity(cutlass::make_Coord(options.index_size, problem_size.k())),
tensor_c.layout().capacity(problem_size.mn()),
tensor_d_scattered.layout().capacity(problem_size.mn()),
tensor_a.layout().stride(),
tensor_b.layout().stride(),
tensor_c.layout().stride(),
tensor_d_scattered.layout().stride(),
nullptr, // <- pointer to index vector to gather A on device
tensor_indices.device_data(), // <- pointer to index vector to gather B on device
tensor_indices.device_data()}; // <- pointer to index vector to scatter D on device
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// CPU reference calculation
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_ref(problem_size.mn());
cutlass::reference::host::TensorFill(
tensor_d_ref.host_view()); // <- Fill matrix D on host with zeros
status = gemm_op();
cudaDeviceSynchronize();
CUTLASS_CHECK(status);
if (options.reference_check) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < options.index_size; ++j) {
int b_c_d_col = tensor_indices.at({j, 0});
for (int k = 0; k < problem_size.k(); ++k) {
tensor_d_ref.at({i, b_c_d_col}) +=
alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
}
tensor_d_ref.at({i, b_c_d_col}) += (beta * tensor_c.at({i, b_c_d_col}));
}
}
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_scattered.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d_scattered.host_view(),
tensor_d_ref.host_view());
if (!passed) {
std::cout << "Failed!\n";
std::stringstream fname;
fname << "error_gather_GEMM_scatter_fusion.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "A =\n" << tensor_a.host_view()
<< "\nB =\n" << tensor_b.host_view()
<< "\nindices =\n" << tensor_indices.host_view()
<< "\nC =\n" << tensor_c.host_view()
<< "\n\nReference =\n" << tensor_d_ref.host_view()
<< "\nComputed =\n" << tensor_d_scattered.host_view();
return -1;
} else {
std::cout << "Passed!\n";
}
}
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << "Runtime: " << result.runtime_ms << " ms\n";
std::cout << " GFLOPs: " << result.gflops << "\n";
return 0;
}
int main(int argc, const char ** argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return 0;
}
if (!options.valid()) {
std::cerr << "Invalid problem." << "\n";
return -1;
}
return run(options);
}
| cutlass/examples/36_gather_scatter_fusion/gather_scatter_fusion.cu/0 | {
"file_path": "cutlass/examples/36_gather_scatter_fusion/gather_scatter_fusion.cu",
"repo_id": "cutlass",
"token_count": 8501
} | 5 |
################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import sys
print("This example is deprecated. Please see examples/python for examples of using "
"the CUTLASS Python interface.")
sys.exit(0)
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
import csv
import argparse
# parse the arguments
parser = argparse.ArgumentParser(
description="Launch CUTLASS GEMM Grouped kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'], help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignment of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU. \
NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels. \
This parameter is passed in at present to match the APIs of other kernels. The parameter \
is unused within the kernel")
# precompute mode
parser.add_argument("-pm", "--precompute_mode",
default="Device", type=str, choices=["Host", "Device"],
help="Grouped Gemm Scheduing on device only (Device) or using host precompute (Host)")
# arguments
parser.add_argument("-p", "--problem_size_dir", type=str, default="grouped_gemm_problem_size.csv",
help="path to the csv file contains the problem sizes")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if args.activation_function == "identity":
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
precompute_mode = getattr(SchedulerMode, args.precompute_mode)
operation = GemmOperationGrouped(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
precompute_mode=precompute_mode
)
if args.print_cuda:
print(operation.rt_module.emit())
pycutlass.compiler.add_module([operation, ])
reference_module = ReferenceModule(A, B, C)
# get problems
problem_sizes = []
with open(args.problem_size_dir) as csv_file:
reader = csv.reader(csv_file)
for row in reader:
problem_sizes.append(
cutlass_bindings.gemm.GemmCoord(int(row[0]), int(row[1]), int(row[2]))
)
problem_count = len(problem_sizes)
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
problem_sizes_coord = []
tensor_D_refs = []
for problem_size in problem_sizes:
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(bfloat16)
else:
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(low=-2, high=2, size=(problem_size.m()
* problem_size.k(),)).astype(getattr(np, args.element_a))
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(bfloat16)
else:
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(low=-2, high=2, size=(problem_size.k()
* problem_size.n(),)).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
c_size = problem_size.n()
elif args.layout_c == "ColumnMajor":
c_size = problem_size.m()
else:
raise ValueError(args.layout_c)
else:
c_size = problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_As.append(tensor_A)
tensor_Bs.append(tensor_B)
tensor_Cs.append(tensor_C)
tensor_Ds.append(tensor_D)
tensor_D_ref = reference_module.run(
tensor_A, tensor_B, tensor_C, problem_size,
args.alpha, args.beta, args.bias)
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
tensor_D_refs.append(tensor_D_ref)
problem_sizes_coord.append(problem_size)
arguments = GemmGroupedArguments(
operation, problem_sizes_coord, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds,
output_op=operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
)
operation.run(arguments)
arguments.sync()
for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs):
try:
assert np.array_equal(tensor_d, tensor_d_ref)
except:
assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5)
print("Passed.")
| cutlass/examples/40_cutlass_py/customizable/gemm_grouped.py/0 | {
"file_path": "cutlass/examples/40_cutlass_py/customizable/gemm_grouped.py",
"repo_id": "cutlass",
"token_count": 5926
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#ifdef HAS_PYTORCH
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#endif
#include <curand_kernel.h>
#include <cmath>
#include <vector>
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/platform/platform.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "debug_utils.h"
#include "epilogue/epilogue_pipelined.h"
#include "epilogue/epilogue_rescale_output.h"
#include "gemm/custom_mma.h"
#include "gemm/find_default_mma.h"
#include "gemm/mma_from_smem.h"
#include "gemm_kernel_utils.h"
#include "transform/tile_smem_loader.h"
#include <inttypes.h>
using namespace gemm_kernel_utils;
namespace {
template <typename scalar_t, typename Arch>
constexpr int getWarpsPerSmFw() {
return (
Arch::kMinComputeCapability >= 80 &&
!cutlass::platform::is_same<scalar_t, float>::value
? 16
: 12);
}
static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value) {
// source: https://stackoverflow.com/a/51549250
return (value >= 0)
? __int_as_float(atomicMax((int*)addr, __float_as_int(value)))
: __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
}
} // namespace
// If ToBatchHookType_ is supplied other than this default (which is
// never the case in the xformers library) then the user is
// defining the logic which each block uses to find its data to work on,
// with the advance_to_batch function with the following signature.
// It should return false if there is no work to do for this block.
// In general this will not work with saving for backward due to fixed layout
// for logsumexp and incompatible rngs for dropout, so is likely only useful for
// custom inference.
struct DefaultToBatchHook {
template <typename Params>
CUTLASS_DEVICE static bool advance_to_batch(
Params&,
int64_t& /* q_start */,
int64_t& /* k_start */) {
return true;
}
};
template <
// The datatype of Q/K/V
typename scalar_t_,
// Architecture we are targeting (eg `cutlass::arch::Sm80`)
typename ArchTag,
// If Q/K/V are correctly aligned in memory and we can run a fast kernel
bool isAligned_,
int kQueriesPerBlock_,
int kKeysPerBlock_,
// upperbound on `max(value.shape[-1], query.shape[-1])`
int kMaxK_ = (int)cutlass::platform::numeric_limits<uint32_t>::max(),
// This is quite slower on V100 for some reason
// Set to false if you know at compile-time you will never need dropout
bool kSupportsDropout_ = true,
bool kSupportsBias_ = true,
typename ToBatchHookType_ = DefaultToBatchHook>
struct AttentionKernel {
enum CustomMaskType {
NoCustomMask = 0,
CausalFromTopLeft = 1,
CausalFromBottomRight = 2,
NumCustomMaskTypes,
};
using scalar_t = scalar_t_;
using accum_t = float;
using lse_scalar_t = float;
using output_t = scalar_t;
// Accumulator between 2 iterations
// Using `accum_t` improves perf on f16 at the cost of
// numerical errors
using output_accum_t = accum_t;
static constexpr bool kSupportsDropout = kSupportsDropout_;
static constexpr bool kSupportsBias = kSupportsBias_;
static constexpr int kKeysPerBlock = kKeysPerBlock_;
static constexpr int kQueriesPerBlock = kQueriesPerBlock_;
static constexpr int kMaxK = kMaxK_;
static constexpr bool kIsAligned = isAligned_;
static constexpr bool kSingleValueIteration = kMaxK <= kKeysPerBlock;
static constexpr int32_t kAlignLSE = 32; // block size of backward
static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value == 16;
static constexpr bool kPreloadV =
ArchTag::kMinComputeCapability >= 80 && kIsHalf;
static constexpr bool kKeepOutputInRF = kSingleValueIteration;
static constexpr bool kNeedsOutputAccumulatorBuffer = !kKeepOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
static_assert(kQueriesPerBlock % 32 == 0, "");
static_assert(kKeysPerBlock % 32 == 0, "");
static constexpr int kNumWarpsPerBlock =
kQueriesPerBlock * kKeysPerBlock / (32 * 32);
static constexpr int kWarpSize = 32;
// Launch bounds
static constexpr int kNumThreads = kWarpSize * kNumWarpsPerBlock;
static constexpr int kMinBlocksPerSm =
getWarpsPerSmFw<scalar_t, ArchTag>() / kNumWarpsPerBlock;
struct Params {
// Input tensors
scalar_t* query_ptr = nullptr; // [num_queries, num_heads, head_dim]
scalar_t* key_ptr = nullptr; // [num_keys, num_heads, head_dim]
scalar_t* value_ptr = nullptr; // [num_keys, num_heads, head_dim_value]
scalar_t* attn_bias_ptr = nullptr; // [num_heads, num_queries, num_keys]
int32_t* seqstart_q_ptr = nullptr;
int32_t* seqstart_k_ptr = nullptr;
int32_t* seqlen_k_ptr = nullptr;
uint32_t causal_diagonal_offset = 0;
// Output tensors
output_t* output_ptr = nullptr; // [num_queries, num_heads, head_dim_value]
// [num_queries, num_heads, head_dim_value]
output_accum_t* output_accum_ptr = nullptr;
// [num_heads, num_queries] - can be null
lse_scalar_t* logsumexp_ptr = nullptr;
// Scale
accum_t scale = 0.0;
// Dimensions/strides
int32_t head_dim = 0;
int32_t head_dim_value = 0;
int32_t num_queries = 0;
int32_t num_keys = 0;
int32_t num_keys_absolute = 0;
uint8_t custom_mask_type = NoCustomMask;
int32_t q_strideM = 0;
int32_t k_strideM = 0;
int32_t v_strideM = 0;
int32_t bias_strideM = 0;
int32_t o_strideM = 0;
// Everything below is only used in `advance_to_block`
// and shouldn't use registers
int32_t q_strideH = 0;
int32_t k_strideH = 0;
int32_t v_strideH = 0;
int64_t bias_strideH = 0;
int64_t q_strideB = 0;
int64_t k_strideB = 0;
int64_t v_strideB = 0;
int64_t bias_strideB = 0;
int32_t num_batches = 0;
int32_t num_heads = 0;
// dropout
bool use_dropout = false;
unsigned long long dropout_batch_head_rng_offset = 0;
float dropout_prob = 0.0f;
#ifdef HAS_PYTORCH
at::PhiloxCudaState rng_engine_inputs = at::PhiloxCudaState(0, 0);
#endif
// Moves pointers to what we should process
// Returns "false" if there is no work to do
CUTLASS_DEVICE bool advance_to_block() {
auto batch_id = blockIdx.z;
auto head_id = blockIdx.y;
auto query_start = blockIdx.x * kQueriesPerBlock;
auto lse_dim = ceil_div((int32_t)num_queries, kAlignLSE) * kAlignLSE;
if (kSupportsDropout) {
dropout_batch_head_rng_offset =
batch_id * num_heads * num_queries * num_keys +
head_id * num_queries * num_keys;
}
int64_t q_start = 0, k_start = 0;
// Advance to current batch - in case of different sequence lengths
constexpr bool kToBatchHook =
!cutlass::platform::is_same<ToBatchHookType_, DefaultToBatchHook>::
value;
if (kToBatchHook) {
// Call out to a custom implementation.
if (!ToBatchHookType_::advance_to_batch(*this, q_start, k_start)) {
return false;
}
} else if (seqstart_q_ptr != nullptr) {
assert(seqstart_k_ptr != nullptr);
seqstart_q_ptr += batch_id;
q_start = seqstart_q_ptr[0];
int64_t q_next_start = seqstart_q_ptr[1];
int64_t k_end;
seqstart_k_ptr += batch_id;
if (seqlen_k_ptr) {
k_start = seqstart_k_ptr[0];
k_end = k_start + seqlen_k_ptr[batch_id];
} else {
k_start = seqstart_k_ptr[0];
k_end = seqstart_k_ptr[1];
}
num_queries = q_next_start - q_start;
num_keys = k_end - k_start;
if (query_start >= num_queries) {
return false;
}
} else {
query_ptr += batch_id * q_strideB;
key_ptr += batch_id * k_strideB;
value_ptr += batch_id * v_strideB;
output_ptr += int64_t(batch_id * num_queries) * o_strideM;
if (output_accum_ptr != nullptr) {
output_accum_ptr +=
int64_t(batch_id * num_queries) * (head_dim_value * num_heads);
}
q_start = 0;
k_start = 0;
}
// Advance to the current batch / head / query_start
query_ptr += (q_start + query_start) * q_strideM + head_id * q_strideH;
key_ptr += k_start * k_strideM + head_id * k_strideH;
value_ptr += k_start * v_strideM + head_id * v_strideH;
output_ptr +=
int64_t(q_start + query_start) * o_strideM + head_id * head_dim_value;
if (kSupportsBias && attn_bias_ptr != nullptr) {
attn_bias_ptr += (batch_id * bias_strideB) + (head_id * bias_strideH);
}
if (output_accum_ptr != nullptr) {
output_accum_ptr +=
int64_t(q_start + query_start) * (head_dim_value * num_heads) +
head_id * head_dim_value;
} else {
// Accumulate directly in the destination buffer (eg for f32)
output_accum_ptr = (accum_t*)output_ptr;
}
if (logsumexp_ptr != nullptr) {
// lse[batch_id, head_id, query_start]
logsumexp_ptr +=
batch_id * lse_dim * num_heads + head_id * lse_dim + query_start;
}
// Custom masking
if (custom_mask_type == CausalFromBottomRight) {
causal_diagonal_offset = num_keys - num_queries;
}
// We use num_keys_absolute to index into the rng_state
// We need this index to match between forward and backwards
num_keys_absolute = num_keys;
if (custom_mask_type == CausalFromTopLeft ||
custom_mask_type == CausalFromBottomRight) {
// the bottom row of the current block is query_start + kQueriesPerBlock
// the last active key is then query_start + causal_diagonal_offset +
// kQueriesPerBlock so num_keys is the min between actual num_keys and
// this to avoid extra computations
num_keys = cutlass::fast_min(
int32_t(query_start + causal_diagonal_offset + kQueriesPerBlock),
num_keys);
}
num_queries -= query_start;
num_batches = 0; // no longer used after
// If num_queries == 1, and there is only one key head we're wasting
// 15/16th of tensor core compute In that case :
// - we only launch kernels for head_id % kQueriesPerBlock == 0
// - we iterate over heads instead of queries (strideM = strideH)
if (num_queries == 1 && k_strideH == 0 && v_strideH == 0) {
if (head_id % kQueriesPerBlock != 0)
return false;
q_strideM = q_strideH;
num_queries = num_heads;
num_heads = 1; // unused but here for intent
// remove causal since n_query = 1
// otherwise, offset would change with head !
custom_mask_type = NoCustomMask;
o_strideM = head_dim_value;
}
// Make sure the compiler knows these variables are the same on all
// the threads of the warp.
// Only worth doing if they could have been modified above.
query_ptr = warp_uniform(query_ptr);
key_ptr = warp_uniform(key_ptr);
value_ptr = warp_uniform(value_ptr);
if (kSupportsBias) {
attn_bias_ptr = warp_uniform(attn_bias_ptr);
}
output_ptr = warp_uniform(output_ptr);
output_accum_ptr = warp_uniform(output_accum_ptr);
logsumexp_ptr = warp_uniform(logsumexp_ptr);
num_queries = warp_uniform(num_queries);
num_keys = warp_uniform(num_keys);
num_heads = warp_uniform(num_heads);
o_strideM = warp_uniform(o_strideM);
custom_mask_type = warp_uniform(custom_mask_type);
return true;
}
__host__ dim3 getBlocksGrid() const {
return dim3(
ceil_div(num_queries, (int32_t)kQueriesPerBlock),
num_heads,
num_batches);
}
__host__ dim3 getThreadsGrid() const {
return dim3(kWarpSize, kNumWarpsPerBlock, 1);
}
};
struct MM0 {
/*
In this first matmul, we compute a block of `Q @ K.T`.
While the calculation result is still hot in registers, we update
`mi`, `m_prime`, `s_prime` in shared-memory, and then store this value
into a shared-memory ("AccumulatorSharedStorage") that is used later as
operand A for the second matmul (see MM1)
*/
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
using OpClass = typename GemmType::OpClass;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
OpClass,
ArchTag,
scalar_t,
scalar_t,
scalar_t, // ElementC
accum_t // ElementAccumulator
>;
static constexpr int kAlignmentA =
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment;
static constexpr int kAlignmentB =
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment;
using ThreadblockShape = cutlass::gemm::
GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using DefaultMma = typename cutlass::gemm::threadblock::FindDefaultMma<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::ColumnMajor, // LayoutB,
kAlignmentB,
accum_t,
cutlass::layout::RowMajor, // LayoutC,
OpClass,
ArchTag, // ArchTag
ThreadblockShape, // ThreadblockShape
WarpShape, // WarpShape
typename GemmType::InstructionShape, // InstructionShape
ArchTag::kMinComputeCapability >= 80 && kIsHalf
? 4
: DefaultConfig::kStages,
typename GemmType::Operator // Operator
>::DefaultMma;
using MmaCore = typename DefaultMma::MmaCore;
using IteratorA = typename DefaultMma::IteratorA;
using IteratorB = typename DefaultMma::IteratorB;
using DefaultThreadblockMma = typename DefaultMma::ThreadblockMma;
using Mma = typename cutlass::platform::conditional<
kSingleValueIteration,
typename MakeCustomMma<DefaultThreadblockMma, kMaxK>::Mma,
DefaultThreadblockMma>::type;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
accum_t,
kWarpSize>::Iterator;
static_assert(
MmaCore::WarpCount::kM * MmaCore::WarpCount::kN *
MmaCore::WarpCount::kK ==
kNumWarpsPerBlock,
"");
// used for efficient load of bias tile Bij from global to shared memory
using BiasLoader = TileSmemLoader<
scalar_t,
cutlass::MatrixShape<kQueriesPerBlock, kKeysPerBlock>,
MmaCore::kThreads,
// input restriction: kv_len has to be a multiple of this value
128 / cutlass::sizeof_bits<scalar_t>::value>;
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename Mma::Operator::IteratorC,
typename Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MM1 {
/**
Second matmul: perform `attn @ V` where `attn` is the attention (not
normalized) and stored in shared memory
*/
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
using OpClass = typename GemmType::OpClass;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
OpClass,
ArchTag,
scalar_t,
scalar_t,
output_accum_t, // ElementC
accum_t // ElementAccumulator
>;
static constexpr int kAlignmentA = DefaultConfig::kAlignmentA; // from smem
static constexpr int kAlignmentB =
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment;
using ThreadblockShape = cutlass::gemm::
GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using LayoutB = cutlass::layout::RowMajor;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
kAlignmentA,
scalar_t, // ElementB,
LayoutB, // LayoutB,
kAlignmentB,
output_accum_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
ArchTag::kMinComputeCapability >= 80 && kIsHalf
? 4
: DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Policy::Operator::Shape, // WarpShape
typename DefaultGemm::Mma::Policy::Operator::InstructionShape,
typename DefaultGemm::Mma::Policy::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MM0::AccumulatorSharedStorage::Shape::kN, // kMaxK
WarpIteratorA,
false>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
static_assert(
WarpCount::kM * WarpCount::kN * WarpCount::kK == kNumWarpsPerBlock,
"");
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::PredicatedTileIterator<
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
output_t>;
using OutputTileIteratorAccum =
typename cutlass::epilogue::threadblock::PredicatedTileIterator<
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
output_accum_t>;
};
static constexpr int64_t kAlignmentQ = MM0::kAlignmentA;
static constexpr int64_t kAlignmentK = MM0::kAlignmentB;
static constexpr int64_t kAlignmentV = 1;
// Shared storage - depends on kernel params
struct ScalingCoefs {
cutlass::Array<accum_t, kQueriesPerBlock> m_prime;
cutlass::Array<accum_t, kQueriesPerBlock> s_prime;
cutlass::Array<accum_t, kQueriesPerBlock> mi;
cutlass::Array<accum_t, kQueriesPerBlock> out_rescale;
cutlass::Array<accum_t, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>
addition_storage;
};
struct SharedStorageEpilogueAtEnd : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
union {
typename MM0::BiasLoader::SmemTile bias;
typename MM0::AccumulatorSharedStorage si;
};
typename MM1::Mma::SharedStorage mm1;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return epilogue;
}
};
struct SharedStorageEpilogueInLoop : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
union {
typename MM0::BiasLoader::SmemTile bias;
typename MM0::AccumulatorSharedStorage si;
};
typename MM1::Mma::SharedStorage mm1;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return after_mm0.epilogue;
}
};
using SharedStorage = typename cutlass::platform::conditional<
kSingleValueIteration || kKeepOutputInRF,
SharedStorageEpilogueAtEnd,
SharedStorageEpilogueInLoop>::type;
static bool __host__ check_supported(Params const& p) {
CHECK_ALIGNED_PTR(p.query_ptr, kAlignmentQ);
CHECK_ALIGNED_PTR(p.key_ptr, kAlignmentK);
CHECK_ALIGNED_PTR(p.value_ptr, kAlignmentV);
if (kSupportsBias) {
CHECK_ALIGNED_PTR(p.attn_bias_ptr, kAlignmentQ);
XFORMERS_CHECK(
p.num_batches <= 1 || p.bias_strideB % kAlignmentQ == 0,
"attn_bias is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.bias_strideH % kAlignmentQ == 0,
"attn_bias is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.bias_strideM % kAlignmentQ == 0,
"attn_bias is not correctly aligned");
}
XFORMERS_CHECK(
p.q_strideM % kAlignmentQ == 0,
"query is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.k_strideM % kAlignmentK == 0,
"key is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.v_strideM % kAlignmentV == 0,
"value is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.q_strideH % kAlignmentQ == 0,
"query is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.k_strideH % kAlignmentK == 0,
"key is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.v_strideH % kAlignmentV == 0,
"value is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.custom_mask_type < NumCustomMaskTypes,
"invalid value for `custom_mask_type`");
return true;
}
static void CUTLASS_DEVICE attention_kernel(Params& p) {
// In this block, we will only ever:
// - read query[query_start:query_end, :]
// - write to output[query_start:query_end, :]
extern __shared__ char smem_buffer[];
SharedStorage& shared_storage = *((SharedStorage*)smem_buffer);
auto& m_prime = shared_storage.m_prime;
auto& s_prime = shared_storage.s_prime;
auto& mi = shared_storage.mi;
auto& out_rescale = shared_storage.out_rescale;
const uint32_t query_start = blockIdx.x * kQueriesPerBlock;
static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, "");
if (thread_id() < kQueriesPerBlock) {
s_prime[thread_id()] = accum_t(0);
out_rescale[thread_id()] = accum_t(1.0);
m_prime[thread_id()] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
mi[thread_id()] = -cutlass::platform::numeric_limits<accum_t>::infinity();
}
typename MM1::Mma::FragmentC accum_o;
accum_o.clear();
auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator {
using OutputTileIterator = typename MM1::OutputTileIterator;
return OutputTileIterator(
typename OutputTileIterator::Params{(int32_t)p.o_strideM},
p.output_ptr,
typename OutputTileIterator::TensorCoord{
p.num_queries, p.head_dim_value},
thread_id(),
{0, col});
};
auto createOutputAccumIter = [&](int col) ->
typename MM1::OutputTileIteratorAccum {
using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum;
return OutputTileIteratorAccum(
typename OutputTileIteratorAccum::Params{
(int32_t)(p.head_dim_value * p.num_heads)},
p.output_accum_ptr,
typename OutputTileIteratorAccum::TensorCoord{
p.num_queries, p.head_dim_value},
thread_id(),
{0, col});
};
#ifdef HAS_PYTORCH
curandStatePhilox4_32_10_t curand_state_init;
if (kSupportsDropout && p.use_dropout) {
const auto seeds = at::cuda::philox::unpack(p.rng_engine_inputs);
// each element of the attention matrix P with shape
// (batch_sz, n_heads, n_queries, n_keys) is associated with a single
// offset in RNG sequence. we initialize the RNG state with offset that
// starts at the beginning of a (n_queries, n_keys) matrix for this
// block's batch_id and head_id
// initializing rng state is very expensive, so we run once per kernel,
// rather than once per iteration. each iteration takes a copy of the
// initialized RNG state and offsets it as needed.
curand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + p.dropout_batch_head_rng_offset,
&curand_state_init);
}
#endif
// Iterate through keys
for (int32_t iter_key_start = 0; iter_key_start < p.num_keys;
iter_key_start += kKeysPerBlock) {
int32_t problem_size_0_m =
cutlass::fast_min((int32_t)kQueriesPerBlock, p.num_queries);
int32_t problem_size_0_n = cutlass::fast_min(
int32_t(kKeysPerBlock), p.num_keys - iter_key_start);
int32_t const& problem_size_0_k = p.head_dim;
int32_t const& problem_size_1_n = p.head_dim_value;
int32_t const& problem_size_1_k = problem_size_0_n;
auto prologueV = [&](int blockN) {
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{typename MM1::LayoutB(p.v_strideM)},
p.value_ptr + iter_key_start * p.v_strideM,
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
MM1::Mma::prologue(
shared_storage.after_mm0.mm1,
iterator_V,
thread_id(),
problem_size_1_k);
};
__syncthreads(); // Need to have shared memory initialized, and `m_prime`
// updated from end of prev iter
//
// MATMUL: Q.K_t
//
// Computes the block-matrix product of:
// (a) query[query_start:query_end, :]
// with
// (b) key[iter_key_start:iter_key_start + kKeysPerBlock]
// and stores that into `shared_storage.si`
//
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {0, 0, 0};
cutlass::MatrixCoord tb_offset_A{
tb_tile_offset.m() * MM0::Mma::Shape::kM, tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{
tb_tile_offset.k(), tb_tile_offset.n() * MM0::Mma::Shape::kN};
// Construct iterators to A and B operands
typename MM0::IteratorA iterator_A(
typename MM0::IteratorA::Params(
typename MM0::MmaCore::LayoutA(p.q_strideM)),
p.query_ptr,
{problem_size_0_m, problem_size_0_k},
thread_id(),
tb_offset_A);
typename MM0::IteratorB iterator_B(
typename MM0::IteratorB::Params(
typename MM0::MmaCore::LayoutB(p.k_strideM)),
p.key_ptr + iter_key_start * p.k_strideM,
{problem_size_0_k, problem_size_0_n},
thread_id(),
tb_offset_B);
auto my_warp_id = warp_uniform(warp_id());
auto my_lane_id = lane_id();
// Construct thread-scoped matrix multiply
typename MM0::Mma mma(
shared_storage.mm0, thread_id(), my_warp_id, my_lane_id);
typename MM0::Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
__syncthreads();
if (kPreloadV) {
prologueV(0);
} else {
MM1::Mma::drain_cp_asyncs();
}
typename MM0::Mma::Operator::IteratorC::TensorCoord
iteratorC_tile_offset = {
(tb_tile_offset.m() * MM0::Mma::WarpCount::kM) +
(my_warp_id % MM0::Mma::WarpCount::kM),
(tb_tile_offset.n() * MM0::Mma::WarpCount::kN) +
(my_warp_id / MM0::Mma::WarpCount::kM)};
// multiply by scaling factor
if (kSupportsBias) {
accum =
cutlass::multiplies<typename MM0::Mma::FragmentC>()(p.scale, accum);
}
// apply attention bias if applicable
if (kSupportsBias && p.attn_bias_ptr != nullptr) {
// load bias tile Bij into shared memory
typename MM0::BiasLoader::GmemTileIterator bias_iter(
{cutlass::layout::RowMajor(p.bias_strideM)},
// attn_bias_pointer points to matrix of size (n_queries, n_keys)
// for the relevant batch_id and head_id
p.attn_bias_ptr + query_start * p.bias_strideM + iter_key_start,
{problem_size_0_m, problem_size_0_n},
thread_id());
cutlass::TensorRef<scalar_t, cutlass::layout::RowMajor> bias_tensor_ref(
shared_storage.after_mm0.bias.data(),
cutlass::layout::RowMajor(MM0::ThreadblockShape::kN));
typename MM0::BiasLoader::SmemTileIterator smem_tile_iter(
bias_tensor_ref, thread_id());
MM0::BiasLoader::load(bias_iter, smem_tile_iter);
// Pij += Bij, Pij is in register fragment and Bij is in shared memory
auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset(
my_lane_id, my_warp_id, iteratorC_tile_offset);
MM0::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (accum_m < problem_size_0_m && accum_n < problem_size_0_n) {
accum[idx] += bias_tensor_ref.at({accum_m, accum_n});
}
},
[&](int accum_m) {});
}
// Mask out last if causal
// This is only needed if upper-right corner of current query / key block
// intersects the mask Coordinates of upper-right corner of current block
// is y=query_start x=min(iter_key_start + kKeysPerBlock, num_keys)) The
// first masked element is x = y + offset -> query_start + offset There is
// intersection (and we need to mask) if min(iter_key_start +
// kKeysPerBlock, num_keys)) >= query_start + offset
if (p.custom_mask_type &&
cutlass::fast_min(iter_key_start + kKeysPerBlock, p.num_keys) >=
(query_start + p.causal_diagonal_offset)) {
auto query_start = blockIdx.x * kQueriesPerBlock;
auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset(
my_lane_id, my_warp_id, iteratorC_tile_offset);
int32_t last_col;
MM0::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
// last absolute col is (last absolute query + offset)
// last local col is (last absolute query + offset -
// iter_key_start)
last_col = query_start + accum_m + p.causal_diagonal_offset -
iter_key_start;
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n > last_col) {
accum[idx] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
}
},
[&](int accum_m) {});
}
// Update `mi` from accum stored in registers
// Also does accum[i] <- exp(accum[i] - mi)
iterative_softmax<typename MM0::Mma::Operator::IteratorC>(
accum_o,
accum,
mi,
m_prime,
s_prime,
out_rescale,
shared_storage.addition_storage,
my_lane_id,
thread_id(),
my_warp_id,
p.num_keys - iter_key_start,
iter_key_start == 0,
iteratorC_tile_offset,
kSupportsBias ? 1.0f : p.scale);
// Output results to shared-memory
int warp_idx_mn_0 = my_warp_id %
(MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM,
warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM};
MM0::B2bGemm::accumToSmem(
shared_storage.after_mm0.si, accum, my_lane_id, output_tile_coords);
__syncthreads();
#ifdef HAS_PYTORCH
// apply dropout (if applicable) after we've written Pij to smem.
// dropout is applied by multiplying each element of Pij by:
// - 0 with probability dropout_p
// - 1 / (1 - dropout_p) with probability 1 - dropout_p
//
// for backward purposes we want to be able to map each element of the
// attention matrix to the same random uniform number as the one we used
// in forward, without needing to use the same iteration order or having
// to store the dropout matrix. its possible to do this in registers but
// it ends up being very slow because each thread having noncontiguous
// strips of the Pij tile means we have to skip around a lot, and also
// have to generate a single random number at a time
if (kSupportsDropout && p.use_dropout) {
auto si = shared_storage.after_mm0.si.accum_ref();
// each thread handles a contiguous sequence of elements from Sij, all
// coming from the same row. the reason they have to come from the same
// row is that the sampling random numbers from a contiguous random
// number sequence is much more efficient than jumping around, and the
// linear offset of each element of S (the global matrix) maps to an
// offset in a random number sequence. for S, the end of a row and the
// beginning of the next have adjacent offsets, but for Sij, this is not
// necessarily the case.
const int num_threads = blockDim.x * blockDim.y * blockDim.z;
const int threads_per_row =
cutlass::fast_min(num_threads / problem_size_0_m, problem_size_0_n);
const int elts_per_thread = cutlass::round_nearest(
cutlass::ceil_div(problem_size_0_n, threads_per_row), 4);
const int thread_i = thread_id() / threads_per_row;
const int thread_start_j =
(thread_id() % threads_per_row) * elts_per_thread;
if (thread_i < problem_size_0_m && thread_start_j < problem_size_0_n) {
curandStatePhilox4_32_10_t curand_state = curand_state_init;
skipahead(
static_cast<unsigned long long>(
(query_start + thread_i) * p.num_keys_absolute +
(iter_key_start + thread_start_j)),
&curand_state);
const float dropout_scale = 1.0 / (1.0 - p.dropout_prob);
// apply dropout scaling to elements this thread is responsible for,
// in chunks of 4
for (int sij_start_col_idx = thread_start_j; sij_start_col_idx <
cutlass::fast_min(thread_start_j + elts_per_thread,
problem_size_0_n);
sij_start_col_idx += 4) {
const float4 rand_uniform_quad = curand_uniform4(&curand_state);
CUTLASS_PRAGMA_UNROLL
for (int quad_idx = 0; quad_idx < 4; ++quad_idx) {
si.at({thread_i, sij_start_col_idx + quad_idx}) *=
static_cast<scalar_t>(
dropout_scale *
((&rand_uniform_quad.x)[quad_idx] > p.dropout_prob));
}
}
}
__syncthreads(); // p.use_dropout should have same value kernel-wide
}
#endif
//
// MATMUL: Attn . V
// Run the matmul `attn @ V` for a block of attn and V.
// `attn` is read from shared memory (in `shared_storage_si`)
// `V` is read from global memory (with iterator_B)
//
const int64_t nBlockN = kSingleValueIteration
? 1
: ceil_div(
(int64_t)problem_size_1_n, int64_t(MM1::ThreadblockShape::kN));
for (int blockN = 0; blockN < nBlockN; ++blockN) {
int gemm_k_iterations =
(problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add and store it in accum
// (in registers)
if (!kPreloadV) {
__syncthreads(); // we share shmem between mma and epilogue
}
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{typename MM1::LayoutB(p.v_strideM)},
p.value_ptr + iter_key_start * p.v_strideM,
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
typename MM1::Mma mma_pv(
// operand A: Pij_dropped in shared memory
shared_storage.after_mm0.si.accum_ref(),
// operand B: shared memory staging area for Vj, which is loaded
// from global memory
shared_storage.after_mm0.mm1.operand_B_ref(),
(int)thread_id(),
(int)my_warp_id,
(int)my_lane_id);
mma_pv.set_prologue_done(kPreloadV);
if (!kKeepOutputInRF) {
accum_o.clear();
}
mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o);
__syncthreads();
if (kPreloadV && !kSingleValueIteration && blockN + 1 < nBlockN) {
prologueV(blockN + 1);
}
if (!kKeepOutputInRF) {
MM1::Mma::drain_cp_asyncs();
DISPATCH_BOOL(
iter_key_start == 0, kIsFirst, ([&] {
DISPATCH_BOOL(
(iter_key_start + kKeysPerBlock) >= p.num_keys,
kIsLast,
([&] {
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp =
typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp = typename cutlass::epilogue::
thread::MemoryEfficientAttentionNormalize<
typename cutlass::platform::conditional<
kIsLast,
output_t,
output_accum_t>::type,
output_accum_t,
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator,
ElementCompute,
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue = typename cutlass::epilogue::threadblock::
EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename cutlass::platform::conditional<
kIsLast,
typename MM1::OutputTileIterator,
typename MM1::OutputTileIteratorAccum>::type,
typename DefaultEpilogue::
AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // Read
// iterator
>;
int col = blockN * MM1::Mma::Shape::kN;
auto source_iter = createOutputAccumIter(col);
auto dest_iter = call_conditional<
kIsLast,
decltype(createOutputIter),
decltype(createOutputAccumIter)>::
apply(createOutputIter, createOutputAccumIter, col);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
my_warp_id,
my_lane_id);
epilogue(rescale, dest_iter, accum_o, source_iter);
}));
}));
if (!kSingleValueIteration) {
__syncthreads();
}
}
}
__syncthreads(); // we modify `m_prime` after
}
if (kKeepOutputInRF) {
constexpr bool kIsFirst = true;
constexpr bool kIsLast = true;
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp =
typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize<
output_t, // output
output_accum_t, // source
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator, // accum
output_accum_t, // compute
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue =
typename cutlass::epilogue::threadblock::EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename MM1::OutputTileIterator, // destination
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // source tile
>;
auto dest_iter = createOutputIter(0);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
warp_id(),
lane_id());
MM1::Mma::drain_cp_asyncs();
epilogue(rescale, dest_iter, accum_o);
}
// 7. Calculate logsumexp
// To make the backward easier, we pad logsumexp with `inf`
// this avoids a few bound checks, and is not more expensive during fwd
static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, "");
if (p.logsumexp_ptr && thread_id() < kQueriesPerBlock) {
auto lse_dim = ceil_div((int32_t)p.num_queries, kAlignLSE) * kAlignLSE;
constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E
if (thread_id() < p.num_queries) {
p.logsumexp_ptr[thread_id()] = accum_t(mi[thread_id()] / kLog2e) +
cutlass::fast_log(accum_t(s_prime[thread_id()]));
} else if (thread_id() < lse_dim) {
p.logsumexp_ptr[thread_id()] =
cutlass::platform::numeric_limits<accum_t>::infinity();
}
}
}
template <typename WarpIteratorC>
CUTLASS_DEVICE static void iterative_softmax(
typename WarpIteratorC::Fragment& frag_o, // output so far
typename WarpIteratorC::Fragment& frag,
cutlass::Array<accum_t, kQueriesPerBlock>& mi,
cutlass::Array<accum_t, kQueriesPerBlock>& m_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& s_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& out_rescale,
cutlass::Array<accum_t, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>&
addition_storage,
int8_t lane_id,
int8_t thread_id,
int8_t warp_id,
int max_col,
bool is_first,
typename WarpIteratorC::TensorCoord const& tile_offset,
float scaling) {
/* Iterates on the accumulator and corresponding position on result matrix
(1) Update `mi[r]` to the max value of the row `r`
(2) In a second iteration do the following:
(a) accum <- exp(accum - mi)
(b) m_prime <- exp(m_prime - mi)
(c) s_prime <- s_prime * m_prime + sum(accum)
All of this is done on registers, before we store all of this
on shared memory for the next matmul with Value.
*/
using Fragment = typename WarpIteratorC::Fragment;
using LambdaIterator = typename DefaultMmaAccumLambdaIterator<
WarpIteratorC,
accum_t,
kWarpSize>::Iterator;
// Convert to `accum_t` (rather than double)
constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E
static_assert(kQueriesPerBlock % kNumWarpsPerBlock == 0, "");
static constexpr int kLinesPerWarp = kQueriesPerBlock / kNumWarpsPerBlock;
frag = cutlass::multiplies<Fragment>()(scaling * kLog2e, frag);
auto lane_offset =
LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset);
// First update `mi` to the max per-row
{
accum_t max;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
max = -cutlass::platform::numeric_limits<accum_t>::infinity();
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n < max_col) {
max = cutlass::fast_max(max, frag[idx]);
}
},
[&](int accum_m) {
// Having 4x atomicMax seems faster than reduce within warp
// first...
atomicMaxFloat(&mi[accum_m], max);
});
}
// Make sure we all share the update values for `mi`
__syncthreads();
// Doing this `exp` is quite expensive. Let's
// split it across the warps
bool restore_mi_to_minus_inf = false;
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
auto m_prime_id = m_prime[id];
auto mi_id = mi[id];
bool changed = m_prime_id < mi_id; // `false` if both are -inf
if (changed) {
auto m_prime_exp = exp2f(m_prime_id - mi_id);
out_rescale[id] = m_prime_exp;
s_prime[id] *= m_prime_exp;
} else {
// Only when bias is enabled, it's possible that all the first values
// of attention are masked to `-inf`. In that case we want to avoid
// `nan = exp2f(-inf - (-inf))` so we temporarily set `mi` to 0
if (kSupportsBias &&
mi_id == -cutlass::platform::numeric_limits<accum_t>::infinity()) {
restore_mi_to_minus_inf = true;
mi[id] = 0.0f;
}
out_rescale[id] = 1.0f;
}
}
__syncthreads(); // Update output fragments
if (kKeepOutputInRF && !is_first) {
accum_t line_rescale;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { line_rescale = out_rescale[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag_o[idx] = frag_o[idx] * line_rescale;
},
[&](int accum_m) {});
}
// Update accum_m, accum_n, ...
{
accum_t mi_row, total_row;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { mi_row = mi[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag[idx] =
(accum_n < max_col) ? exp2f(frag[idx] - mi_row) : accum_t(0.0);
},
[&](int accum_m) {});
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { total_row = 0.0; },
[&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; },
[&](int accum_m) {
if (LambdaIterator::reduceSameRow(
lane_id, total_row, [](accum_t a, accum_t b) {
return a + b;
})) {
// NOTE: we could atomically add `total_row` to `s_prime`, but
// it's faster (and deterministic) to avoid atomics here
addition_storage
[accum_m + kQueriesPerBlock * tile_offset.column()] =
total_row;
}
});
}
__syncthreads();
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
accum_t total_row = s_prime[id];
if (restore_mi_to_minus_inf) {
// Restore `mi`, see above when we set `restore_mi_to_minus_inf=true`
mi[id] = -cutlass::platform::numeric_limits<accum_t>::infinity();
} else {
m_prime[id] = mi[id];
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < MM0::MmaCore::WarpCount::kN; ++i) {
total_row += addition_storage[id + kQueriesPerBlock * i];
}
s_prime[id] = total_row;
}
}
static CUTLASS_DEVICE int8_t lane_id() {
return threadIdx.x;
}
static CUTLASS_DEVICE int8_t warp_id() {
return threadIdx.y;
}
static CUTLASS_DEVICE int16_t thread_id() {
return threadIdx.x + threadIdx.y * blockDim.x;
}
};
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_batched_impl(typename AK::Params p) {
if (!p.advance_to_block()) {
return;
}
AK::attention_kernel(p);
}
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_batched(typename AK::Params params);
| cutlass/examples/41_fused_multi_head_attention/kernel_forward.h/0 | {
"file_path": "cutlass/examples/41_fused_multi_head_attention/kernel_forward.h",
"repo_id": "cutlass",
"token_count": 24041
} | 7 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
class gen_build_sys:
def __init__(self, cutlass_deps_dir, output_dir = "../"):
self.output_dir = output_dir
self.cutlass_deps_dir = cutlass_deps_dir
def gen_top(self):
code = ""
code += '''\
# Auto Generated code - Do not edit.
cmake_minimum_required(VERSION 3.8)
project(CUTLASS_MULTI_GEMMS LANGUAGES CXX CUDA)
find_package(CUDAToolkit)
set(CUDA_PATH ${{CUDA_TOOLKIT_ROOT_DIR}})
set(CUTLASS_PATH \"{cutlass_deps_dir}/include\")
set(CUTLASS_UTIL_PATH \"{cutlass_deps_dir}/tools/util/include\")
list(APPEND CMAKE_MODULE_PATH ${{CUDAToolkit_LIBRARY_DIR}})
'''.format(cutlass_deps_dir=self.cutlass_deps_dir)
code += '''\
set(GPU_ARCHS \"\" CACHE STRING
\"List of GPU architectures (semicolon-separated) to be compiled for.\")
if(\"${GPU_ARCHS}\" STREQUAL \"\")
set(GPU_ARCHS \"70\")
endif()
foreach(arch ${GPU_ARCHS})
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -gencode arch=compute_${arch},code=sm_${arch}\")
if(SM STREQUAL 70 OR SM STREQUAL 75)
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -DWMMA\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -DWMMA\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -DWMMA\")
endif()
endforeach()
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS}\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -Wall\")
set(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -O0 -G -Xcompiler -Wall\")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_STANDARD STREQUAL \"11\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-extended-lambda\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr\")
endif()
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler=-fno-strict-aliasing\")
set(COMMON_HEADER_DIRS
${PROJECT_SOURCE_DIR}
${CUDAToolkit_INCLUDE_DIRS}
)
set(COMMON_LIB_DIRS
${CUDAToolkit_LIBRARY_DIR}
)
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_PATH})
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_UTIL_PATH})
'''
code += '''\
include_directories(
${COMMON_HEADER_DIRS}
)
link_directories(
${COMMON_LIB_DIRS}
)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
add_definitions(-DGOOGLE_CUDA=1)
add_executable(sample
sample/sample.cu
one_api.cu
)
target_link_libraries(sample PRIVATE
-lcudart
-lnvToolsExt
${CMAKE_THREAD_LIBS_INIT}
)
if(NOT DEFINED LIB_INSTALL_PATH)
set(LIB_INSTALL_PATH ${CMAKE_CURRENT_BINARY_DIR})
endif()
'''
return code
def gen_code(self):
top_code = self.gen_top()
with open(self.output_dir + "CMakeLists.txt", "w") as f:
f.write(top_code)
| cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_cmake.py/0 | {
"file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_cmake.py",
"repo_id": "cutlass",
"token_count": 1880
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Dual-GEMM Example.
Fused kernel that outputs `D0` and `D1`.
We assume that B0/B1 have the same shape/layout
```
D0 = epilogue0(X @ B0, C0)
D1 = epilogue1(X @ B1, C1)
D2 = element_wise(D0, D1)
```
D0 and D1 will be optionally stored in gmem (`kStoreD0` / `kStoreD1`)
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "device/dual_gemm.h"
#include "thread/left_silu_and_mul.h"
#include "dual_gemm_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::gemm::GemmCoord problem_size(4096, 4096, 8192);
cutlass::gemm::GemmCoord batch_problem_size(321, 256, 512);
constexpr int kStages = 3;
constexpr bool kSplitKSerial = false;
constexpr bool kUseBias = true;
constexpr int kBatchCount = 37;
#if 0
using ElementOperandA = cutlass::bfloat16_t;
using ElementOperandB = cutlass::bfloat16_t;
using ElementOutput = cutlass::bfloat16_t;
using ElementAccumulator = float;
using ElementCompute = float;
#else
using ElementOperandA = cutlass::half_t;
using ElementOperandB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
#endif
constexpr auto kScaleType = kUseBias ? cutlass::epilogue::thread::ScaleType::NoBetaScaling : (
// No bias
kSplitKSerial ? cutlass::epilogue::thread::ScaleType::Default : cutlass::epilogue::thread::ScaleType::Nothing
);
using EpilogueOutputOp0 = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
kScaleType
>;
using EpilogueOutputOp1 = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
kScaleType
>;
using EpilogueOutputOp2 = cutlass::epilogue::thread::LeftSiLUAndMul<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementOutput,
ElementCompute
>;
const ElementCompute alpha0 = ElementCompute(1);
const ElementCompute beta0 = ElementCompute(kUseBias ? 1 : 0);
const ElementCompute alpha1 = ElementCompute(1);
const ElementCompute beta1 = ElementCompute(kUseBias ? 1 : 0);
bool run_nonfused_gemm_f16_sm80() {
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Gemm0 = cutlass::gemm::device::Gemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp0,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
8,
8,
kSplitKSerial
>;
using Gemm1 = cutlass::gemm::device::Gemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
8,
8,
kSplitKSerial
>;
NonFusedDualGemmRun<Gemm0, Gemm1> nonFusedGemm;
std::cout << "Running Non-fused GEMMs FP16 TN GEMMs...\n";
bool pass = nonFusedGemm.run(
problem_size,
alpha0,
beta0,
alpha1,
beta1,
true /* is_profiling */
);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
template <typename T>
struct LeftSiLUAndMul {
struct Params{};
CUTLASS_HOST_DEVICE LeftSiLUAndMul(Params p) {}
CUTLASS_HOST_DEVICE void set_k_partition(int, int) {}
CUTLASS_HOST_DEVICE T operator() (
T const &lhs,
T const &rhs) const {
cutlass::epilogue::thread::SiLu<T> silu;
cutlass::multiplies<T> mul;
auto silu_lhs = silu(lhs);
return mul(silu_lhs, rhs);
}
template <int kCount>
CUTLASS_HOST_DEVICE cutlass::Array<T, kCount> operator() (
cutlass::Array<T, kCount> const &lhs,
cutlass::Array<T, kCount> const &rhs) const {
cutlass::epilogue::thread::SiLu<T> silu;
cutlass::multiplies<T> mul;
auto silu_lhs = silu(lhs);
return mul(silu_lhs, rhs);
}
};
bool run_fused_gemm_f16_sm80_shmem() {
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// Optionally, we might not need intermediate GEMM outputs
constexpr bool kStoreD0 = true;
constexpr bool kStoreD1 = true;
using DualGemm = cutlass::gemm::device::DualGemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
EpilogueOutputOp2,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
kStoreD0,
kStoreD1,
kSplitKSerial
>;
DualFusedGemmRun<DualGemm> fusedGemm;
std::cout << "Running Fused FP16 TN GEMMs + Epilogue2...\n";
bool passed = fusedGemm.run(
problem_size,
alpha0,
beta0,
alpha1,
beta1
);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
bool run_batched_fused_gemm_f16_sm80_shmem() {
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// Optionally, we might not need intermediate GEMM outputs
constexpr bool kStoreD0 = true;
constexpr bool kStoreD1 = true;
using DualGemm = cutlass::gemm::device::DualGemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
EpilogueOutputOp2,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
kStoreD0,
kStoreD1,
kSplitKSerial
>;
DualFusedGemmRun<DualGemm> fusedGemm;
std::cout << "Running Batched Fused FP16 TN GEMMs + Epilogue2...\n";
bool passed = fusedGemm.run(
batch_problem_size,
alpha0,
beta0,
alpha1,
beta1,
kBatchCount,
false, /* broadcast_b1 */
false /* is_profiling */
);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
bool run_broadcast_fused_gemm_f16_sm80_shmem() {
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// Optionally, we might not need intermediate GEMM outputs
constexpr bool kStoreD0 = true;
constexpr bool kStoreD1 = true;
using DualGemm = cutlass::gemm::device::DualGemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
// different LayoutB0 and B1
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
EpilogueOutputOp2,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
kStoreD0,
kStoreD1,
kSplitKSerial
>;
DualFusedGemmRun<DualGemm> fusedGemm;
std::cout << "Running Broadcast Fused FP16 TN GEMMs + Epilogue2...\n";
bool passed = fusedGemm.run(
problem_size,
alpha0,
beta0,
alpha1,
beta1,
1, /* batch_count */
true, /* broadcast_b1 */
true /* is_profiling */
);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
bool run_batched_broadcast_fused_gemm_f16_sm80_shmem() {
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// Optionally, we might not need intermediate GEMM outputs
constexpr bool kStoreD0 = true;
constexpr bool kStoreD1 = true;
using DualGemm = cutlass::gemm::device::DualGemm<
ElementOperandA,
cutlass::layout::RowMajor,
ElementOperandB,
// different LayoutB0 and B1
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
EpilogueOutputOp2,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
kStages,
kStoreD0,
kStoreD1,
kSplitKSerial
>;
DualFusedGemmRun<DualGemm> fusedGemm;
std::cout << "Running Batch Broadcast Fused FP16 TN GEMMs + Epilogue2...\n";
bool passed = fusedGemm.run(
batch_problem_size,
alpha0,
beta0,
alpha1,
beta1,
kBatchCount,
true, /* broadcast_b1 */
false /* is_profiling */
);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_gemm_f16_sm80,
&run_fused_gemm_f16_sm80_shmem,
&run_batched_fused_gemm_f16_sm80_shmem,
&run_broadcast_fused_gemm_f16_sm80_shmem,
&run_batched_broadcast_fused_gemm_f16_sm80_shmem
};
std::string test_name = (
"dual-gemm f16 bias=" +
std::to_string(kUseBias) +
" split_k_serial=" +
std::to_string(kSplitKSerial) +
" batch_count=" +
std::to_string(kBatchCount)
);
return testRun(80, funcs, test_name);
}
////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/45_dual_gemm/dual_gemm.cu/0 | {
"file_path": "cutlass/examples/45_dual_gemm/dual_gemm.cu",
"repo_id": "cutlass",
"token_count": 4897
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Hopper GEMM example leveraging collective operation builders.
This example showcases the use of CUTLASS's CollectiveBuilder to easily construct performant kernels
targeting the NVIDIA Hopper architecture.
Background and motivation
-------------------------
CUTLASS kernels are highly parameterizable via template parameters. To ease the selection of template
parameters, CUTLASS 2 leveraged DefaultGemmConfigurations. Given a small set of parameters, such as
the data types of operands and the compute capability of the GPU, DefaultGemmConfigurations defined sensible
defaults for the many other parameters to the kernel (e.g., warp shape, stage count).
However, DefaultGemmConfigurations leave multiple opportunities for improvement, which are addressed
in CUTLASS 3:
(1) DefaultGemmConfigurations do not allow one to use a more-performant set of parameters without
specifying every parameter. For example, the DefaultGemmConfigurations for GEMMs targeting
Ampere specify that three pipeline stages should be used regardless of the sizes of operands.
If one wished to increase this value, one would also need to specify all other template parameters.
This leaves a gap between a high-level ease-of-use interface and a lower-level detailed interface.
(2) A new DefaultGemmConfiguration was required for each combination of operand types, GPU architecture,
and operation type (e.g., Tensor Core or SIMT). This led to increased code size to cover each unique
configuration and a lack of extensibility from one DefaultGemmConfiguration to another.
Alongside these opportunities for improvement, the Hopper architecture offers new features that increase
the number of valid configurations of a kernel. In addition to the many template parameters already available
in CUTLASS 2 kernels, CUTLASS 3 kernels targeting Hopper also have various scheduling modes to select from that control:
(1) how data is to be loaded (e.g., using the Hopper TMA feature or Ampere cp.async)
(2) how work is to be divided among warps in a thread block (e.g., whether to use "warp specialization")
(3) whether persistent thread blocks should be used
This increased configuration space further motivates rethinking DefaultGemmConfigurations.
Introduction to the CollectiveBuilder
-------------------------------------
CUTLASS 3 introduces the CollectiveBuilder to further ease the process of selecting template parameters
for kernels targeting Hopper. Similar to the DefaultGemmConfigurations used in CUTLASS 2, the CollectiveBuilder
takes in a small set of template parameters (e.g., the data types of operands A and B). It then automatically
determines the data loading strategy to use depending on whether the Hopper TMA feature can be used with the provided
parameters. If one does not indicate a particular scheduling policy or stage count to use (by using `Auto` template
parameters), the CollectiveBuilder will also automatically select these.
Unlike DefaultGemmConfigurations a partial specialization of the CollectiveBuilder is not needed for many
configurations of operand types. Instead the CollectiveBuilder "builds" a configuration based on generic
properties of the specified operands, layouts, and other parameters. For example, when the stage count
is set to `Auto`, the CollectiveBuilder may automatically calculate the maximum number of stages that
will fit in shared memory given the types of operands and the thread block shape, rather than simply using
a single default value.
CUTLASS 3.x provides builders for both collective mainloops and epilogues. The particular implementation of
the collective is specified via the schedule tags that corresond to the underlying collective's
dispatch policy. `gemm::collective::KernelScheduleAuto` and `epilogue::collective::EpilogueScheduleAuto`
are special cases of these schedules that allow the builder to also decide the dispatch policy for you,
therefore letting the builder pick the collective specialization.
CUTLASS builders make an attempt to pick the best schedule when `Auto` is provided such that the
assembled collectives have the best performance, but this is not a guarantee. A user relying on `Auto`
may get a free performance upgrade with newer CUTLASS releases in case we can provide more optimized
implementations that the builder can transparently assemble for `Auto`. But a user should not rely on
`Auto` if they require a specific scheduling policy and/or stage count to be used.
If a user decides to let the builders pick the collective specialization via `Auto` schedules,
they must be used for both mainloop and epilogue alike to ensure compatibility between the
chosen collectives. Additionally, if a user chooses to opt in to a specific schedule, non-`Auto`
schedules must be used for both mainloop and epilogue builder schedules, and these schedules
must be compatible.
One does not need to use the CollectiveBuilder to declare CUTLASS 3 kernels; one can still provide
every template parameter to the `gemm::collective::CollectiveMma`. Specifying every template parameter
in this manner remains the primary API for using CUTLASS 3 kernels. `CollectiveBuilder`s are
simply meant to be a convenience interface.
Details of this example
-----------------------
This example walks through the use of the CollectiveBuilder with various schedules and stage counts specified.
This example also illustrates how CUTLASS 3 GEMMs targeting Hopper automatically support batched GEMMs by simply
extending the problem size with an additional tensor rank.
CUTLASS 3.2 provides initial support for epilogue visitor trees (EVT) for the TMA warp-specialized collective.
EVTs allow users to define their own customized epilogue fusion patterns without having to write a new
collective epilogue. This is done by representing the fusion as a compute graph, where each node is one of a
fundamental set of load, store, or compute operations. These operations are either elementwise for tensor
inputs/outputs, broadcasts for vector/scalar inputs, or reductions for vector/scalar outputs.
This example shows how users can define their own custom EVT and use it with the CollectiveBuilder.
Example usage:
$ ./examples/49_hopper_with_collective_builder/49_collective_builder \
--m=2048 --n=2048 --k=2048 --l=2
*/
#include <iostream>
#include "cute/tensor.hpp"
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
using namespace cute;
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Command line options parsing
struct Options {
bool help;
bool error;
int m, n, k, l;
float alpha, beta;
Options():
help(false),
error(false),
m(2048), n(2048), k(2048), l(1),
alpha(1.f), beta(0.f)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m, 2048);
cmd.get_cmd_line_argument("n", n, 2048);
cmd.get_cmd_line_argument("k", k, 2048);
cmd.get_cmd_line_argument("l", l, 1);
cmd.get_cmd_line_argument("alpha", alpha, 1.f);
cmd.get_cmd_line_argument("beta", beta, 0.f);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "49_hopper_with_collective_builder\n\n"
<< " This example showcases the use of CUTLASS's collective operation builders to easily construct\n"
<< " performant kernels targeting NVIDIA's Hopper architecture.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM\n"
<< " --n=<int> Sets the N extent of the GEMM\n"
<< " --k=<int> Sets the K extent of the GEMM\n"
<< " --l=<int> Sets the L extent (batch count) of the GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
// Wrapper to construct, run, and verify a GEMM. This example showcases CUTLASS's collective
// operation builders by specializing the GEMM only on the kernel schedule it will use and the
// number of pipeline stages.
//
// One can use a special `Auto` type that tells the CollectiveBuilder
// to select an appropriate value on its own. The CollectiveBuilder will attempt to select
// configurations that will result in the most-performant kernel, but this is not a guarantee.
//
// If relying on 'Auto' schedules, all builders must use the 'Auto' schedule to ensure compatiblity.
// For example, if `KernelScheduleAuto` is used for the mainloop builder, `EpilogueScheduleAuto` must
// be used for the epilogue builder.
//
// Furthermore, if an override schedule is selected, both epilogue and mainloop schedules must
// be specifically opt into a compatible selection.
//
// Behavior of the CollectiveBuilder with `Auto` types is subject to change in future releases
// -- do not rely on `Auto` if you require a specific scheduling policy.
template <
// Type of kernel schedule to generate
class MainloopScheduleType = cutlass::gemm::collective::KernelScheduleAuto,
// Type of epilogue schedule to generate
class EpilogueScheduleType = cutlass::epilogue::collective::EpilogueScheduleAuto,
// Number of pipeline stages to use
class StageCountType = cutlass::gemm::collective::StageCountAuto,
// Type of tile scheduler to use
class TileSchedulerType = cutlass::gemm::PersistentScheduler,
// Do we use custom epilogue visitor tree (EVT) fusion
bool UseCustomEVT = false
>
struct ExampleRunner {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using LayoutD = cutlass::layout::ColumnMajor;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
using ElementScalar = float;
// 16B alignment lets us use TMA
static constexpr int AlignmentA = 16 / sizeof(ElementA);
static constexpr int AlignmentB = 16 / sizeof(ElementB);
static constexpr int AlignmentC = 16 / sizeof(ElementC);
static constexpr int AlignmentD = 16 / sizeof(ElementD);
static_assert(not UseCustomEVT ||
(cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecialized> ||
cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecializedCooperative>),
"Epilogue visitor trees are currently only supported by the TMA warp-specialized epilogue");
static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
// EVTs can be constructed by composing the fundamental load/store/compute visitor operations defined in include/cutlass/epilogue/fusion
// For more complex examples of EVT construction please refer to include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp
using CustomEVT = // alpha * acc + beta * C
cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<cutlass::homogeneous_multiply_add, ElementD, ElementCompute, RoundStyle>, // beta * C + (alpha * acc)
cutlass::epilogue::fusion::Sm90ScalarBroadcast<ElementScalar>, // beta
cutlass::epilogue::fusion::Sm90SrcFetch<ElementC>, // C
cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<cutlass::multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc
cutlass::epilogue::fusion::Sm90ScalarBroadcast<ElementScalar>, // alpha
cutlass::epilogue::fusion::Sm90AccFetch // acc
>
>;
// A predefined set of fusion operations (implemented with EVT) are supported by the TMA warp-specialized epilogue.
// Users can select one of these operations by passing one of the tags defined in include/cutlass/epilogue/fusion/operations.hpp
// to the CollectiveBuilder. This frees the user from having to compute additional parameters such as stage counts and copy atoms/layouts.
// These tags also provide additional metadata that can be queried at compile time.
using DefaultOperation = cutlass::epilogue::fusion::LinearCombination<ElementD, ElementCompute, ElementC, ElementScalar, RoundStyle>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementCompute,
ElementC, LayoutC, AlignmentC,
ElementD, LayoutD, AlignmentD,
EpilogueScheduleType,
cute::conditional_t<UseCustomEVT, CustomEVT, DefaultOperation>
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
Shape<_128,_128,_64>, Shape<_2,_1,_1>,
cute::conditional_t<cute::is_same_v<StageCountType, cutlass::gemm::collective::StageCountAuto>,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
StageCountType>,
MainloopScheduleType
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
TileSchedulerType
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
using LayoutTagA = cutlass::gemm::detail::StrideToLayoutTagA_t<StrideA>;
using LayoutTagB = cutlass::gemm::detail::StrideToLayoutTagB_t<StrideB>;
using LayoutTagC = cutlass::gemm::detail::StrideToLayoutTagC_t<StrideC>;
using LayoutTagD = cutlass::gemm::detail::StrideToLayoutTagC_t<StrideD>;
//
// Data members
//
/// Initialization
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
uint64_t seed = 0;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::ElementD> block_D;
cutlass::DeviceAllocation<typename Gemm::ElementD> block_ref_D;
//
// Methods
//
bool verify(const ProblemShapeType& problem_size, float alpha, float beta) {
auto [M, N, K, L] = problem_size;
cutlass::TensorRef ref_A(block_A.get(), Gemm::LayoutA::packed({M, K}));
cutlass::TensorRef ref_B(block_B.get(), Gemm::LayoutB::packed({K, N}));
cutlass::TensorRef ref_C(block_C.get(), Gemm::LayoutC::packed({M, N}));
cutlass::TensorRef ref_D(block_ref_D.get(), Gemm::LayoutD::packed({M, N}));
cutlass::reference::device::GemmComplex(
{M, N, K},
ElementScalar(alpha),
ref_A,
cutlass::ComplexTransform::kNone,
ref_B,
cutlass::ComplexTransform::kNone,
ElementScalar(beta),
ref_C,
ref_D,
ElementAccumulator(0),
L, // batch_count
M * K, // batch_stride_A
K * N, // batch_stride_B
M * N, // batch_stride_C
M * N // batch_stride_D
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Reference kernel failed. Last CUDA error: "
<< cudaGetErrorString(result) << std::endl;
return false;
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::device::BlockCompareEqual(block_ref_D.get(), block_D.get(), block_D.size());
return passed;
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const ProblemShapeType& problem_size) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [M, N, K, L] = problem_shape_MNKL;
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));
block_A.reset(M * K * L);
block_B.reset(K * N * L);
block_C.reset(M * N * L);
block_D.reset(M * N * L);
block_ref_D.reset(M * N * L);
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
}
bool run(const Options& options, const cutlass::KernelHardwareInfo& hw_info) {
ProblemShapeType problem_size = ProblemShapeType{options.m, options.n, options.k, options.l};
initialize(problem_size);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{block_A.get(), stride_A, block_B.get(), stride_B},
{{}, // epilogue.thread
block_C.get(), stride_C, block_D.get(), stride_D},
hw_info
};
// Custom EVT fusions will have nested unnamed args, the structure of which
// can be deduced from the type definition of the EVT.
// Each node's arguments has the recursive structure of
// {first_child_args, ..., last_child_args, op_args},
// For more complex examples of EVT initialization please refer to
// include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp
if constexpr (UseCustomEVT) {
arguments.epilogue.thread =
{ // ternary op : beta * C + (alpha * acc)
{{options.beta}}, // leaf op+args : beta
{}, // leaf op+args : C
{ // binary op : alpha * acc
{{options.alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
}; // end ternary op
}
// Pre-defined fusions will have flat, named args for user-friendlyness
else {
arguments.epilogue.thread.alpha = options.alpha;
arguments.epilogue.thread.beta = options.beta;
}
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "This kernel is not supported. Last CUDA error is: "
<< cudaGetErrorString(cudaGetLastError()) << std::endl;
return false;
}
status = gemm_op.initialize(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize the CUTLASS kernel. Last CUDA error is: "
<< cudaGetErrorString(cudaGetLastError()) << std::endl;
return false;
}
// Run the GEMM
status = gemm_op.run();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to launch the CUTLASS kernel. Last CUDA error is: "
<< cudaGetErrorString(cudaGetLastError()) << std::endl;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error running the CUTLASS kernel. Last CUDA error is: "
<< cudaGetErrorString(result) << std::endl;
return false;
}
// Verify that the result is correct
bool passed = verify(problem_size, options.alpha, options.beta);
if (!passed) {
std::cerr << "Reference check failed" << std::endl;
}
return passed;
}
};
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to print a description of the example run and its result
void print_result(const std::string& description, bool passed) {
std::cout << description << ": " << (passed ? "Passed" : "Failed") << std::endl;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 12 || props.major < 9) {
std::cout
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater) and CUDA 12.0 or greater.\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
//
// Run examples
//
// The KernelHardwareInfo struct holds the number of SMs on the GPU with a given device ID. This
// information is used by the underlying kernel.
cutlass::KernelHardwareInfo hw_info;
// Change device_id to another value if you are running on a machine with multiple GPUs and wish
// to use a GPU other than that with device ID 0.
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool passed;
// This first example constructs a GEMM using the default schedule and stage count provided by
// the CollectiveBuilder. The scheduling policy that is expected to be most performant will be
// selected and the maximum number of stages that can fit in shared memory will be selected.
//
// This example is equivalent to declaring
// ExampleRunner<
// cutlass::gemm::collective::KernelScheduleAuto,
// cutlass::epilogue::collective::EpilogueScheduleAuto,
// cutlass::gemm::collective::StageCountAuto>
// Each of the `Auto` types indicate that the CollectiveBuilder should determine the scheduling policy and
// stage count. Note that the behavior of the CollectiveBuilder with `Auto` parameters is subject to change
// -- do not rely on `Auto` if you require a specific scheduling policy.
// If you opt in to a non-'Auto' schedule, make sure all collectives are built using specific, compatible schedules.
ExampleRunner<> auto_schedule_auto_stage_runner;
passed = auto_schedule_auto_stage_runner.run(options, hw_info);
print_result("Automatically-selected schedule and stage count", passed);
// One can override the stage count used in the GEMM by replacing cutlass::gemm::collective::StageCountAuto
// with the number of stages to use (5 in this case).
ExampleRunner<
cutlass::gemm::collective::KernelScheduleAuto,
cutlass::epilogue::collective::EpilogueScheduleAuto,
_5> auto_schedule_5_stage_runner;
passed = auto_schedule_5_stage_runner.run(options, hw_info);
print_result("Automatically-selected schedule with 5 stages", passed);
// One can also override the scheduling policy to use. In this case, use the KernelTma scheduling
// policy, which specifies that the Hopper TMA feature should be used, and we also use an epilogue
// that does not use any shared memory.
ExampleRunner<cutlass::gemm::KernelTma, cutlass::epilogue::NoSmemWarpSpecialized> tma_schedule_auto_stage_runner;
passed = tma_schedule_auto_stage_runner.run(options, hw_info);
print_result("TMA schedule with automatically-selected stage count", passed);
// Here, we override the scheduling policy to use Hopper's TMA feature alongside the warp-specialized
// scheduling policy, and an epilogue that does not use any shared memory.
ExampleRunner<cutlass::gemm::KernelTmaWarpSpecialized, cutlass::epilogue::NoSmemWarpSpecialized> ws_schedule_auto_stage_runner;
passed = ws_schedule_auto_stage_runner.run(options, hw_info);
print_result("Warp-specialized TMA schedule with automatically-selected stage count", passed);
// Here, we override the scheduling policy to use Hopper's TMA feature, alongside the warp-specialized
// scheduling policy, TMA-based epilogue, leveraging persistent thread blocks.
ExampleRunner<
cutlass::gemm::KernelTmaWarpSpecializedPingpong,
cutlass::epilogue::TmaWarpSpecialized> ws_pingpong_schedule_auto_stage_runner;
passed = ws_pingpong_schedule_auto_stage_runner.run(options, hw_info);
print_result("Ping-pong warp-specialized TMA schedule with automatically-selected stage count", passed);
// Here, we override the scheduling policy to use stream-K problem decomposition atop the cooperative
// warp-specialized scheduling policy. This kernel continues to leverage persistent thread blocks
// as well aso TMA in both the mainloop and epilogue.
ExampleRunner<
cutlass::gemm::KernelTmaWarpSpecializedCooperative,
cutlass::epilogue::TmaWarpSpecializedCooperative,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::StreamKScheduler> ws_cooperative_stream_k_schedule_auto_stage_runner;
passed = ws_cooperative_stream_k_schedule_auto_stage_runner.run(options, hw_info);
print_result("Cooperative warp-specialized TMA schedule using stream-K with automatically-selected stage count", passed);
// Here, we override the fusion operation to use a customized EVT fusion, in addition to the previous schedule overrides
ExampleRunner<
cutlass::gemm::KernelTmaWarpSpecializedCooperative,
cutlass::epilogue::TmaWarpSpecializedCooperative,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::PersistentScheduler,
true> ws_cooperative_schedule_auto_stage_custom_evt_runner;
passed = ws_cooperative_schedule_auto_stage_custom_evt_runner.run(options, hw_info);
print_result("Cooperative warp-specialized TMA schedule using custom epilogue visitor tree with automatically-selected stage count", passed);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu/0 | {
"file_path": "cutlass/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu",
"repo_id": "cutlass",
"token_count": 9926
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Simple Hopper FP8 GEMM example using CUTLASS 3.0 APIs for NVIDIA Hopper architecture
This example demonstrate a simple way to instantiate and run a FP8 GEMM using the new CUTLASS 3.0
APIs on NVIDIA Hopper architecture. New features that will be showcased in this example are as follows:
1. NVIDIA Hopper architecture introduces a new series of tensor core instructions (GMMA)
which are more efficient than the Ampere tensor core instructions.
2. NVIDIA Hopper architecture includes new Tensor Memory Accelerator (TMA) unit to transfer large
blocks of data efficiently between global memory and shared memory. TMA also supports asynchronous
copies between thread blocks in a cluster.
3. This example uses the Warp Specialized kernel design (see /media/docs/efficient_gemm.md for details).
4. This example shows all important fusions used by FP8 gemm kernels,
i.e., scale factor for A, B, C, D tensor, the abs_max value of D tensor.
Examples:
$ ./examples/54_hopper_fp8_warp_specialized_gemm/54_hopper_fp8_warp_specialized_gemm --m=2048 --n=2048 --k=2048
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gett.hpp"
#include "helper.h"
#include "hopper_fp8_commandline.hpp"
using namespace cute;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::float_e4m3_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::float_e5m2_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C matrix configuration
using ElementC = cutlass::float_e4m3_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// D matrix configuration
using ElementD = ElementC;
using LayoutD = LayoutC;
constexpr int AlignmentD = AlignmentC;
// Auxiliary matrix configuration and other fusion types
using ElementAux = ElementC;
using LayoutAux = LayoutC;
using ElementAmax = float;
using ElementBias = float;
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ElementCompute = float; // Element type for epilogue computation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_64,_128,_128>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecialized;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutAux, cutlass::epilogue::thread::ReLU, ElementD, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementC>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
ArchTag, OperatorClass,
TileShape, ClusterShape,
EpilogueTileType,
ElementAccumulator, ElementCompute,
ElementC, LayoutC, AlignmentC,
ElementD, LayoutD, AlignmentD,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))
>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>, // Indicates ProblemShape
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Extract information from Gemm kernel.
using EpilogueOutputOp = typename Gemm::EpilogueOutputOp;
using ElementScalar = typename EpilogueOutputOp::ElementScalar;
using ElementAmax = typename EpilogueOutputOp::ElementAmax;
using ActivationFunctor = typename EpilogueOutputOp::ActivationFn;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
using StrideAux = StrideD;
constexpr bool IsDFp8 =
cute::is_same_v<ElementD, cutlass::float_e4m3_t> or
cute::is_same_v<ElementD, cutlass::float_e5m2_t>;
constexpr bool IsAuxFp8 =
cute::is_same_v<ElementAux, cutlass::float_e4m3_t> or
cute::is_same_v<ElementAux, cutlass::float_e5m2_t>;
/// Initialization
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
StrideAux stride_aux;
uint64_t seed;
cutlass::HostTensor<ElementA , LayoutA > tensor_A;
cutlass::HostTensor<ElementB , LayoutB > tensor_B;
cutlass::HostTensor<ElementC , LayoutC > tensor_C;
cutlass::HostTensor<ElementD , LayoutD > tensor_D;
cutlass::HostTensor<ElementD , LayoutD > tensor_ref_D;
cutlass::HostTensor<ElementAux, LayoutAux> tensor_aux;
cutlass::HostTensor<ElementAux, LayoutAux> tensor_ref_aux;
using LayoutScalar = cutlass::layout::PackedVectorLayout;
cutlass::HostTensor<ElementScalar, LayoutScalar> scalar_alpha;
cutlass::HostTensor<ElementScalar, LayoutScalar> scalar_beta;
cutlass::HostTensor<ElementScalar, LayoutScalar> scale_A;
cutlass::HostTensor<ElementScalar, LayoutScalar> scale_B;
cutlass::HostTensor<ElementScalar, LayoutScalar> scale_C;
cutlass::HostTensor<ElementScalar, LayoutScalar> scale_D;
cutlass::HostTensor<ElementScalar, LayoutScalar> scale_aux;
cutlass::HostTensor<ElementAmax , LayoutScalar> abs_max_D;
cutlass::HostTensor<ElementAmax , LayoutScalar> reference_abs_max_D;
cutlass::HostTensor<ElementAmax , LayoutScalar> abs_max_aux;
cutlass::HostTensor<ElementAmax , LayoutScalar> reference_abs_max_aux;
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result
{
double avg_runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
Result(
double avg_runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess)
:
avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(false)
{}
};
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
uint64_t seed) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
}
else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
}
else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
return true;
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, options.l));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(options.n, options.k, options.l));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.m, options.n, options.l));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.m, options.n, options.l));
stride_aux = stride_D;
auto a_coord = cutlass::make_Coord(options.m * options.l, options.k);
auto c_coord = cutlass::make_Coord(options.m * options.l, options.n);
auto b_coord = cutlass::make_Coord(options.k, options.n * options.l);
tensor_A.resize(a_coord);
tensor_B.resize(b_coord);
tensor_C.resize(c_coord);
tensor_D.resize(c_coord);
tensor_ref_D.resize(c_coord);
initialize_tensor(tensor_A.host_view(), seed + 2022);
initialize_tensor(tensor_B.host_view(), seed + 2023);
initialize_tensor(tensor_C.host_view(), seed + 2024);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
if (options.save_aux) {
tensor_aux.resize(c_coord);
tensor_aux.sync_device();
tensor_ref_aux.resize(c_coord);
}
if (options.device_scale) {
scalar_alpha.resize(cutlass::make_Coord(1));
scalar_beta.resize(cutlass::make_Coord(1));
scale_A.resize(cutlass::make_Coord(1));
scale_B.resize(cutlass::make_Coord(1));
scale_C.resize(cutlass::make_Coord(1));
scale_D.resize(cutlass::make_Coord(1));
scale_aux.resize(cutlass::make_Coord(1));
cutlass::reference::host::TensorFill(scalar_alpha.host_view(), options.alpha);
cutlass::reference::host::TensorFill(scalar_beta.host_view(), options.beta);
cutlass::reference::host::TensorFill(scale_A.host_view(), options.scale_a);
cutlass::reference::host::TensorFill(scale_B.host_view(), options.scale_b);
cutlass::reference::host::TensorFill(scale_C.host_view(), options.scale_c);
cutlass::reference::host::TensorFill(scale_D.host_view(), options.scale_d);
cutlass::reference::host::TensorFill(scale_aux.host_view(), options.scale_aux);
scalar_alpha.sync_device();
scalar_beta.sync_device();
scale_A.sync_device();
scale_B.sync_device();
scale_C.sync_device();
scale_D.sync_device();
scale_aux.sync_device();
}
if (IsDFp8 && options.save_amax) {
abs_max_D.resize(cutlass::make_Coord(1));
abs_max_D.sync_device();
reference_abs_max_D.resize(cutlass::make_Coord(1));
}
if (IsAuxFp8 && options.save_aux && options.save_amax) {
abs_max_aux.resize(cutlass::make_Coord(1));
abs_max_aux.sync_device();
reference_abs_max_aux.resize(cutlass::make_Coord(1));
}
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options)
{
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
{options.m, options.n, options.k, options.l},
{tensor_A.device_data(), stride_A, tensor_B.device_data(), stride_B},
{
{}, // epilogue.thread
tensor_C.device_data(), stride_C,
tensor_D.device_data(), stride_D
}
};
auto &fusion_args = arguments.epilogue.thread;
fusion_args.alpha = options.alpha;
fusion_args.beta = options.beta;
fusion_args.alpha_ptr = scalar_alpha.device_data();
fusion_args.beta_ptr = scalar_beta.device_data();
fusion_args.scale_a = options.scale_a;
fusion_args.scale_b = options.scale_b;
fusion_args.scale_c = options.scale_c;
fusion_args.scale_a_ptr = scale_A.device_data();
fusion_args.scale_b_ptr = scale_B.device_data();
fusion_args.scale_c_ptr = scale_C.device_data();
// ignored if tensor types are not fp8
fusion_args.scale_d = options.scale_d;
fusion_args.scale_aux = options.scale_aux;
fusion_args.scale_d_ptr = scale_D.device_data();
fusion_args.scale_aux_ptr = scale_aux.device_data();
// leaving/setting these as nullptr disables the fusion at runtime
fusion_args.bias_ptr = nullptr;
if (options.save_aux) {
fusion_args.aux_ptr = tensor_aux.device_data();
fusion_args.dAux = stride_aux;
if (options.save_amax) {
fusion_args.amax_aux_ptr = abs_max_aux.device_data();
}
}
if (options.save_amax) {
fusion_args.amax_D_ptr = abs_max_D.device_data();
}
return arguments;
}
bool verify(const Options &options) {
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
auto A = cute::make_tensor(tensor_A.host_data(),
cute::make_layout(cute::make_shape(options.m, options.k, options.l), stride_A));
auto B = cute::make_tensor(tensor_B.host_data(),
cute::make_layout(cute::make_shape(options.n, options.k, options.l), stride_B));
auto C = cute::make_tensor(tensor_C.host_data(),
cute::make_layout(cute::make_shape(options.m, options.n, options.l), stride_C));
auto D = cute::make_tensor(tensor_ref_D.host_data(),
cute::make_layout(cute::make_shape(options.m, options.n, options.l), stride_D));
auto Aux = cute::make_tensor(tensor_ref_aux.host_data(),
cute::make_layout(cute::make_shape(options.m, options.n, options.l), stride_aux));
using unused_t = decltype(D);
cutlass::reference::host::GettMainloopParams<ElementAccumulator, decltype(A), decltype(B)> mainloop_params{A, B};
cutlass::reference::host::GettEpilogueParams<
ElementScalar,
ElementScalar,
ElementAccumulator,
ElementCompute,
decltype(C),
decltype(D),
unused_t, // bias
decltype(Aux),
unused_t, // valpha
unused_t, // vbeta
ActivationFunctor
> epilogue_params;
epilogue_params.C = C;
epilogue_params.D = D;
epilogue_params.Aux = Aux;
epilogue_params.alpha = options.alpha;
epilogue_params.beta = options.beta;
epilogue_params.scale_a = options.scale_a;
epilogue_params.scale_b = options.scale_b;
epilogue_params.scale_c = options.scale_c;
epilogue_params.scale_d = options.scale_d;
epilogue_params.scale_aux = options.scale_aux;
epilogue_params.abs_max_D = reference_abs_max_D.host_data();
epilogue_params.abs_max_Aux = reference_abs_max_aux.host_data();
// get reference result
cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params);
// compare_reference
tensor_D.sync_host();
bool passed = cutlass::reference::host::TensorEquals(tensor_ref_D.host_view(), tensor_D.host_view());
if (IsDFp8 && options.save_amax) {
abs_max_D.sync_host();
passed &= abs_max_D.at(cutlass::make_Coord(0)) == reference_abs_max_D.at(cutlass::make_Coord(0));
}
if (options.save_aux) {
tensor_aux.sync_host();
passed &= cutlass::reference::host::TensorEquals(tensor_ref_aux.host_view(), tensor_aux.host_view());
if (IsAuxFp8 && options.save_amax) {
abs_max_aux.sync_host();
passed &= abs_max_aux.at(cutlass::make_Coord(0)) == reference_abs_max_aux.at(cutlass::make_Coord(0));
}
}
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options)
{
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << 'x' << options.l << std::endl;
std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS: " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.0 Toolkit to run this example
// and must have compute capability at least 90.
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA 12 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
run<Gemm>(options);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/examples/54_hopper_fp8_warp_specialized_gemm/54_hopper_fp8_warp_specialized_gemm.cu/0 | {
"file_path": "cutlass/examples/54_hopper_fp8_warp_specialized_gemm/54_hopper_fp8_warp_specialized_gemm.cu",
"repo_id": "cutlass",
"token_count": 8006
} | 11 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example demonstrating CuTe and CUTLASS 3.x based Ampere convolution forward propogation kernel
capable of operating on both affine and gather/scatter tensors.
This example demonstartes a few super cool features of CUTLASS and CuTe. It shows off
1. A dense conv 3D fprop kernel written as a single file ...
2. ... that leverages off the shelf CUTLASS collectives to show how custom kernels can use collectives ...
3. ... and uses the exact same templated kernel to also stamp out a gather/scatter 3D fprop conv ...
4. ... while getting near peak performance of the Ampere class tensor core on Ampere and Ada GPUs ...
5. ... by using static cute shapes and strides in case problem shapes are known at compile time.
Full documentation for this example can be found within the README.md file in this directory.
Example executions:
./59_ampere_gather_scatter_conv
./59_ampere_gather_scatter_conv --n=108
./59_ampere_gather_scatter_conv --n=4096 --i=1
./59_ampere_gather_scatter_conv --n=1080 --i=1000
./59_ampere_gather_scatter_conv --n=131072 --i=1000 --no-check
*/
#include <thrust/sequence.h>
#include <thrust/universal_vector.h>
#include "ampere_conv_kernel.h"
#include "gather_tensor.hpp"
#include "cutlass/util/command_line.h"
bool check_cuda_result(cudaError_t code, const char* file, int line) {
if (code == cudaSuccess) {
return true;
}
std::cerr << "CUDA error at (" << file << "," << line << ")\n\t" << unsigned(code) << " -- " << cudaGetErrorString(code) << "\n";
return false;
}
#define CHECK_CUDA(code) (check_cuda_result(code, __FILE__, __LINE__))
using namespace cute;
using example::IndexedGather;
using example::CustomStride;
template<class Operator, class FilterTensor, class ActivationTensor, class OutputTensor>
__global__
__launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor)
void kernel_entrypoint(FilterTensor mFlt, ActivationTensor mAct, OutputTensor mOut) {
extern __shared__ char smem_buf[];
Operator op;
op(mFlt, mAct, mOut, smem_buf);
}
int ampere_dense_conv_fprop(
int num_images,
float* activations,
float* filter,
float* output,
float* output_ref,
int num_iterations = 1,
bool do_ref_check = true) {
auto D = typename AmpereUnpredicatedFprop::D{};
auto H = typename AmpereUnpredicatedFprop::H{};
auto W = typename AmpereUnpredicatedFprop::W{};
auto Z = typename AmpereUnpredicatedFprop::Z{};
auto P = typename AmpereUnpredicatedFprop::P{};
auto Q = typename AmpereUnpredicatedFprop::Q{};
auto C = typename AmpereUnpredicatedFprop::C{};
auto K = typename AmpereUnpredicatedFprop::K{};
auto S = typename AmpereUnpredicatedFprop::S{};
auto R = typename AmpereUnpredicatedFprop::R{};
auto T = typename AmpereUnpredicatedFprop::T{};
int N = num_images; // dynamic
if (num_images % int(typename AmpereUnpredicatedFprop::Tiler_N{}) != 0) {
printf("ERROR: Input image count must be evenly divisible by CTA tiler N.\n");
return 1;
}
// Tensor Activation: (n,d,h,w,c)::(?,6,4,4,64):(6144,1536,384,64,1)
auto activation_layout = make_layout(
make_shape (make_shape ( N, D, H, W), make_shape ( C, _1{},_1{},_1{})),
make_stride(make_stride(D*H*W*C, H*W*C, W*C, C), make_stride(_1{}, _0{},_0{},_0{})));
auto xformed_act_layout = make_layout(
make_shape (make_shape(N, Z, P, Q), make_shape ( C, T, R, S)),
make_stride(stride<0>(activation_layout), make_stride(_1{}, H*W*C, W*C, C)));
// Tensor Filter : (k,c,s,r,t)::(128,3,3,3,64):(1728,576,192,64,1)
auto filter_layout = AmpereUnpredicatedFprop::GmemLayoutFlt{};
// Tensor Output : (n,z,p,q,k)::(?,4,2,2,128):(2048,1024,512,128,1)
auto output_layout = make_ordered_layout(
make_shape( K, make_shape( N, Z, P, Q)),
make_tuple(_0{}, make_tuple(_4{},_3{},_2{},_1{})));
Tensor mActivation = make_tensor(make_gmem_ptr(activations), activation_layout);
Tensor mXformedAct = make_tensor(make_gmem_ptr(activations), xformed_act_layout);
Tensor mFilter = make_tensor(make_gmem_ptr(filter), filter_layout);
Tensor mOutput = make_tensor(make_gmem_ptr(output), output_layout); // (K, (N,Z,P,Q))
Tensor mOutputRef = make_tensor(make_gmem_ptr(output_ref), output_layout);
print("xformed act layout ((N,Z,P,Q), (C,T,R,S)) = "); print(xformed_act_layout); print("\n");
cudaEvent_t start, stop;
CHECK_CUDA(cudaEventCreate(&start));
CHECK_CUDA(cudaEventCreate(&stop));
constexpr size_t smem_size = sizeof(typename AmpereUnpredicatedFprop::SharedStorage);
Tensor gOutput_mn = zipped_divide(mOutput, typename AmpereUnpredicatedFprop::TilerOut{}); // ((BLK_M, BLK_N), (m', n'))
dim3 lauch_grid {static_cast<uint32_t>(size<1,1>(gOutput_mn)), static_cast<uint32_t>(size<1,0>(gOutput_mn)), 1};
CHECK_CUDA(cudaFuncSetAttribute(
kernel_entrypoint<AmpereUnpredicatedFprop, decltype(mFilter), decltype(mXformedAct), decltype(mOutput)>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size));
CHECK_CUDA(cudaEventRecord(start));
for (int i = 0; i < num_iterations; ++i) {
kernel_entrypoint<AmpereUnpredicatedFprop, decltype(mFilter), decltype(mXformedAct), decltype(mOutput)>
<<<lauch_grid, AmpereUnpredicatedFprop::MaxThreadsPerBlock, smem_size>>>(
mFilter, mXformedAct, mOutput);
}
CHECK_CUDA(cudaEventRecord(stop));
CHECK_CUDA(cudaEventSynchronize(stop));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
milliseconds /= float(num_iterations);
double tflop_count = (2 * double(size<0>(xformed_act_layout)) * double(size(filter_layout))) / double(1e12);
double tflops = tflop_count / (double(milliseconds) / double(1e3));
printf("Conv TFLOP count = %f\n", tflop_count);
printf("Conv dense perf: %fms | TFLOP/s = %f\n", milliseconds, tflops);
if (do_ref_check) {
printf("Running host reference check ...\n");
return fprop_reference(mFilter, mXformedAct, mOutput, mOutputRef);
}
else {
return 0;
}
}
int ampere_gather_scatter_conv_fprop(
int num_images,
float* activations,
uint32_t *gather_idx_buf,
float* filter,
float* output,
uint32_t *scatter_idx_buf,
int num_iterations = 1) {
auto D = typename AmpereUnpredicatedFprop::D{};
auto H = typename AmpereUnpredicatedFprop::H{};
auto W = typename AmpereUnpredicatedFprop::W{};
auto Z = typename AmpereUnpredicatedFprop::Z{};
auto P = typename AmpereUnpredicatedFprop::P{};
auto Q = typename AmpereUnpredicatedFprop::Q{};
auto C = typename AmpereUnpredicatedFprop::C{};
auto K = typename AmpereUnpredicatedFprop::K{};
auto S = typename AmpereUnpredicatedFprop::S{};
auto R = typename AmpereUnpredicatedFprop::R{};
auto T = typename AmpereUnpredicatedFprop::T{};
int N = num_images; // dynamic
if (N % int(typename AmpereUnpredicatedFprop::Tiler_N{}) != 0) {
printf("ERROR: Input image count must be evenly divisible by CTA tiler N. Got num_images = %d\n", N);
return 1;
}
// Tensor Filter : (k,c,s,r,t)::(128,3,3,3,64):(1728,576,192,64,1)
auto filter_layout = AmpereUnpredicatedFprop::GmemLayoutFlt{};
// Tensor Output : (n,z,p,q,k)::(?,4,2,2,128):(2048,1024,512,128,1)
auto output_layout = make_ordered_layout(
make_shape( K, make_shape( N, Z, P, Q)),
make_tuple(_0{}, make_tuple(_4{},_3{},_2{},_1{})));
// Input gather layout
// inner_layout(make_coord((nzpq), (csrt))) => (idx_buffer_idx, dense_c_idx)
auto EG = E<0>{}; // Gather basis (1,0) (idx_buffer_idx)
auto EC = E<1>{}; // Contiguous basis (0,1) (dense_offset)
auto xformed_act_logical_inner = make_layout(
make_shape (make_shape ( N, Z, P, Q), make_shape ( C, T, R, S)),
make_stride(make_stride(D*H*W*EG, H*W*EG, W*EG, EG), make_stride(EC, H*W*EG, W*EG, EG)));
// outer_layout(make_coord(idx_buffer_idx, dense_c_idx)) => idx
// IndexedGather obtains idx by applying (gmem_base_ptr + gather_idx_buf[idx_buffer_idx] + dense_offset)
auto xformed_act_gather_outer = make_layout(
make_shape(_1{},_1{}),
make_stride(CustomStride{IndexedGather{gather_idx_buf}, C}, _1{}));
// Compose the inner and outer layouts
// gather_composed(make_coord((nzpq), (csrt))) => idx
auto xformed_act_composed_layout = composition(
xformed_act_gather_outer,
make_arithmetic_tuple(_0{}, _0{}),
xformed_act_logical_inner);
// Output scatter layout
auto out_basis_stride = make_stride(
E<1>{},
make_stride(Z*P*Q*E<0>{}, P*Q*E<0>{}, Q*E<0>{}, _1{}*E<0>{})); // -> (crd0, crd1)
auto out_basis_layout = make_layout(shape(output_layout), out_basis_stride);
auto out_scatter_layout = make_layout(
make_shape(_1{},_1{}),
make_stride(CustomStride{IndexedGather{scatter_idx_buf}, K}, _1{}));
auto out_composed_layout = composition(
out_scatter_layout,
make_arithmetic_tuple(_0{},_0{}),
out_basis_layout);
Tensor mXformedActGather = make_tensor(make_gmem_ptr(activations), xformed_act_composed_layout);
Tensor mFilter = make_tensor(make_gmem_ptr(filter), filter_layout);
Tensor mOutputScatter = make_tensor(make_gmem_ptr(output), out_composed_layout); // (K, (N,Z,P,Q))
Tensor gOutput_mn = zipped_divide(mOutputScatter, typename AmpereUnpredicatedFprop::TilerOut{}); // ((BLK_M, BLK_N), (m', n'))
dim3 lauch_grid {static_cast<uint32_t>(size<1,1>(gOutput_mn)), static_cast<uint32_t>(size<1,0>(gOutput_mn)), 1};
constexpr size_t smem_size = sizeof(typename AmpereUnpredicatedFprop::SharedStorage);
print("xforemed gather layout ((N,Z,P,Q), (C,T,R,S)) = "); print(xformed_act_composed_layout); print("\n");
print("Output scatter layout ( K, (N,Z,P,Q)) = "); print(out_composed_layout); print("\n");
print("Filter layout ( K, (C,T,R,S)) = "); print(filter_layout); print("\n");
CHECK_CUDA(cudaFuncSetAttribute(
kernel_entrypoint<AmpereUnpredicatedFprop, decltype(mFilter), decltype(mXformedActGather), decltype(mOutputScatter)>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size));
cudaEvent_t start, stop;
CHECK_CUDA(cudaEventCreate(&start));
CHECK_CUDA(cudaEventCreate(&stop));
CHECK_CUDA(cudaEventRecord(start));
for (int i = 0; i < num_iterations; ++i) {
kernel_entrypoint<AmpereUnpredicatedFprop, decltype(mFilter), decltype(mXformedActGather), decltype(mOutputScatter)>
<<<lauch_grid, AmpereUnpredicatedFprop::MaxThreadsPerBlock, smem_size>>>(
mFilter, mXformedActGather, mOutputScatter);
}
CHECK_CUDA(cudaEventRecord(stop));
CHECK_CUDA(cudaEventSynchronize(stop));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
milliseconds /= float(num_iterations);
double tflop_count = (2 * double(size<0>(xformed_act_logical_inner)) * double(size(filter_layout))) / double(1e12);
double tflops = tflop_count / (double(milliseconds) / double(1e3));
printf("Conv TFLOP count = %f\n", tflop_count);
printf("Conv gather/scatter perf: %fms | TFLOP/s = %f\n", milliseconds, tflops);
return 0;
}
int
main(int argc, char const** argv) {
cutlass::CommandLine cmd(argc, argv);
std::cout << "Ampere convolution forward propogation kernel supporting both affine and gather/scatter tensors.\n\n";
if (cmd.check_cmd_line_flag("help")) {
std::cout
<< "Options:\n"
"\t--n=<int> Sets the number of images for the input activation tensor (dataset size). Default = 131072.\n"
"\t--i=<int> Sets the benchmarking repetitions. Default = 128.\n"
"\t--nocheck If specified, skips the reference check for dense kernel.\n"
"\t--help Displays this help message and exits.\n";
return 0;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 8) {
std::cerr << "This example requires an Ampere GPU or newer.\n";
return 0;
}
int num_images = 4320;
cmd.get_cmd_line_argument("n", num_images, 4320);
int num_iterations = 128;
cmd.get_cmd_line_argument("i", num_iterations, 128);
bool do_host_ref_check = not cmd.check_cmd_line_flag("no-check");
auto D = typename AmpereUnpredicatedFprop::D{};
auto H = typename AmpereUnpredicatedFprop::H{};
auto W = typename AmpereUnpredicatedFprop::W{};
auto Z = typename AmpereUnpredicatedFprop::Z{};
auto P = typename AmpereUnpredicatedFprop::P{};
auto Q = typename AmpereUnpredicatedFprop::Q{};
auto C = typename AmpereUnpredicatedFprop::C{};
auto K = typename AmpereUnpredicatedFprop::K{};
auto activation_layout = make_layout(
make_shape (make_shape (num_images, D, H, W), make_shape ( C, _1{},_1{},_1{})),
make_stride(make_stride( D*H*W*C, H*W*C, W*C, C), make_stride(_1{}, _0{},_0{},_0{})));
auto filter_layout = typename AmpereUnpredicatedFprop::GmemLayoutFlt{};
auto output_layout = make_ordered_layout(
make_shape( K, make_shape(num_images, Z, P, Q)),
make_step (_0{}, make_step ( _4{},_3{},_2{},_1{})));
print("Filter layout ( K, (C,T,R,S)) = "); print(filter_layout); print("\n");
print("Activation layout ((N,D,H,W), (C,1,1,1)) = "); print(activation_layout); print("\n");
print("Output layout ( K, (N,Z,P,Q)) = "); print(output_layout); print("\n");
// allocate tensors
std::cout << "Allocating tensors ... ";
thrust::universal_vector<float> activation_data(size_t(cute::size(activation_layout)), float(0));
thrust::universal_vector<float> filter_data(size_t(cute::size(filter_layout)), float(0));
thrust::universal_vector<float> output_data(size_t(cute::size(output_layout)), float(0));
thrust::universal_vector<float> output_data_ref(size_t(cute::size(output_layout)), float(0));
std::cout << "done.\n";
// init tensors
std::cout << "Initializing data ... " << std::flush;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> uniform_dist(-1.0, 1.0);
for (std::size_t i = 0; i < size_t(cute::size(activation_layout)); ++i) {
activation_data[i] = uniform_dist(gen);
}
for (std::size_t i = 0; i < size_t(cute::size(filter_layout)); ++i) {
filter_data[i] = uniform_dist(gen);
}
std::cout << "done.\n";
// set up index buffers for gather/scatter, fill with indireciton indices in reversed order
std::cout << "Initializing gather/scatter index buffers ... ";
thrust::universal_vector<uint32_t> gather_idx_buf(size_t(size<0>(activation_layout)));
thrust::universal_vector<uint32_t> scatter_idx_buf(size_t(size<1>(output_layout)));
thrust::sequence(gather_idx_buf.rbegin(), gather_idx_buf.rend());
thrust::sequence(scatter_idx_buf.rbegin(), scatter_idx_buf.rend());
std::cout << "done.\n";
// launch dense
std::cout << "\nRunning dense fprop kernel\n";
int passed = ampere_dense_conv_fprop(
num_images,
activation_data.data().get(),
filter_data.data().get(),
output_data.data().get(),
output_data_ref.data().get(),
num_iterations,
do_host_ref_check);
// launch gather/scatter
std::cout << "\nRunning gather/scatter fprop kernel\n";
ampere_gather_scatter_conv_fprop(
num_images,
activation_data.data().get(),
gather_idx_buf.data().get(),
filter_data.data().get(),
output_data.data().get(),
scatter_idx_buf.data().get(),
num_iterations);
return passed;
}
| cutlass/examples/59_ampere_gather_scatter_conv/ampere_gather_scatter_conv.cu/0 | {
"file_path": "cutlass/examples/59_ampere_gather_scatter_conv/ampere_gather_scatter_conv.cu",
"repo_id": "cutlass",
"token_count": 6729
} | 12 |
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interface for Conv2dThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run Conv2d. [](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/03_basic_conv2d.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import torch
import random
import cutlass
# This controls whether the C++ GEMM declaration will be printed at each step.
# Set to `false` to omit this information.
print_module = True
# Input tensor: [N, H, W, C] under the channel-last layout
N, H, W, C = [32, 28, 28, 64]
# Weight tensor: [K, R, S, C] under the channel-last layout
K, R, S = [128, 3, 3]
# Stride, and padding
stride = (2, 2)
padding = (1, 1)
dilation = (1, 1)
# Compute the output size [N, P, Q, K]
N, P, Q, K = cutlass.Conv2d.output_size((N, H, W, C), (K, R, S, C), padding, stride, dilation)
dtype = torch.float16
type_A = torch.float16
type_B = torch.float16
type_C = torch.float16
type_D = torch.float16
torch.manual_seed(1234)
input = torch.ceil(
torch.empty(size=(N, C, H, W), dtype=type_A, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)
weight = torch.ceil(
torch.empty(size=(K, C, R, S), dtype=type_B, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)
tensor_C = torch.ceil(
torch.empty(size=(N, K, P, Q), dtype=type_B, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)
output = torch.zeros_like(tensor_C)
alpha = 1.0
beta = 0.0<jupyter_output><empty_output><jupyter_text>Declaring and running a Conv2d FpropWe first show you how to run a Conv2d in the forward propagation. To get started, one only needs to provide the tensors declared above to the `cutlass.op.Conv2dFprop` call. This sets up a default Conv2d fprop operation for the given device on which you are running. Assuming that we are runing on SM80, the default is a Conv2d that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># Specifying `element_accumulator` is not required if it is the same as `element`
plan = cutlass.Conv2dFprop(element=dtype, element_accumulator=torch.float32)
plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>There are many other ways to construct a plan from `cutlass.op.Conv2dFprop` (e.g., by specifying the types of each operand, by providing representative tensors as input). For more details on these, see the documentation in the `cutlass.op.Conv2dFprop` constructor.We then compare the output to running the Conv2d using PyTorch. PyTorch use NCHW layout by default, so permutations are required.<jupyter_code>output_torch = alpha * torch.ops.aten.conv2d(
input, weight, stride=stride, padding=padding, dilation=dilation
) + beta * tensor_C
assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond PyTorch, such as NumPy. Declaring and running Conv2d Dgrad and WgradThe Python interface also supports declaring and running backward kernels of Conv2d. To begin with, we construct the tensors for the gradient of input, output, and weight.<jupyter_code>grad_output = torch.ceil(
torch.empty(size=(N, K, P, Q), dtype=type_A, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)
grad_input = torch.zeros_like(input)
grad_weight = torch.zeros_like(weight)
tensor_C_dgrad = torch.ceil(
torch.empty(size=(N, C, H, W), dtype=type_A, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)
tensor_C_wgrad = torch.ceil(
torch.empty(size=(K, C, R, S), dtype=type_B, device="cuda").uniform_(-4.5, 3.5)
).to(memory_format=torch.channels_last)<jupyter_output><empty_output><jupyter_text>The script below gives a simple example of computing a data gradient via the CUTLASS Python interface and via PyTorch.<jupyter_code>plan_dgrad = cutlass.Conv2dDgrad(element=dtype, element_accumulator=torch.float32)
plan_dgrad.run(grad_output, weight, tensor_C_dgrad, grad_input, stride, padding, dilation, alpha, beta, print_module=print_module)
grad_input_torch = alpha * torch.nn.grad.conv2d_input(
(N, C, H, W),
weight, grad_output,
stride=stride, padding=padding
) + beta * tensor_C_dgrad
assert torch.equal(grad_input_torch, grad_input)<jupyter_output><empty_output><jupyter_text>The script below gives a simple example of computing a weight gradient via the CUTLASS Python interface and via PyTorch.<jupyter_code>plan_wgrad = cutlass.Conv2dWgrad(element=dtype, element_accumulator=torch.float32)
plan_wgrad.run(grad_output, input, tensor_C_wgrad, grad_weight, stride, padding, dilation, alpha, beta, print_module=print_module)
grad_weight_torch = alpha * torch.nn.grad.conv2d_weight(
input, (K, C, R, S), grad_output,
stride=stride, padding=padding
) + beta * tensor_C_wgrad
assert torch.equal(grad_weight_torch, grad_weight)<jupyter_output><empty_output><jupyter_text>Running non-default Conv2dsThe previous examples showed how it is simple to get starting running a default Conv2d kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the Conv2d? CUTLASS Python interface exposes mutable parameters that can be set after the `plan` initialization. We summarize these in the table below.|Parameter|Description|| -- | -- ||`tile_description`|The threadblock tile size, warp count, software pipeline stages, and instruction shape||`iterator_algorithm`|The iterator algorithm used to access the source operands||`swizzling_stride`|The stride of the threadblock swizzling functor||`split-K`|Partitions the reduction dimension to different threadblocks| Tile DescriptionThe `tile_description` defines the tiling size of each threadblock, the warp count along each dimension of the tile, the software pipeline stages, and the instruction size. Under the hood, CUTLASS enumerates the different Conv2d configuration parameters for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernel (e.g., threadblock and warp shape).<jupyter_code>plan.opclass = "tensor_op"
tiles = plan.tile_descriptions()
print(f'{len(tiles)} tile descriptions returned')
num_print = 10
print(f'First {num_print} tile descriptions are:')
for td in tiles[:num_print]:
print(td)<jupyter_output><empty_output><jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>random.seed(42)
idx = random.randint(0, len(tiles)-1)
td = tiles[idx]
print(f'Tile description {idx} is: {td}')
plan.tile_description = td
plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)
assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Besides tile descriptions enumerated by CUTLASS, the users can also explicitly set the `threadblockshape`, `warp_shape`, `stages`, `instruction_shape`, and `cluster_shape`. If the configuration is invalid, an exception will be raised at `plan.run()` and the detailed compilation error will be stored in `./cutlass_python_compilation_error.txt` for debugging.<jupyter_code>if plan.cc == 70:
plan.tile_description = {
"threadblock_shape": [64, 256, 32],
"warp_count": [1, 4, 1],
"stages": 2,
"instruction_shape": [8, 8, 4], # optional,
"cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently
}
elif plan.cc == 75:
plan.tile_description = {
"threadblock_shape": [128, 64, 32],
"warp_count": [2, 1, 1],
"stages": 2,
"instruction_shape": [16, 8, 8], # optional,
"cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently
}
elif plan.cc == 80:
plan.tile_description = {
"threadblock_shape": [128, 128, 64],
"warp_count": [2, 2, 1],
"stages": 4,
"instruction_shape": [16, 8, 16], # optional,
"cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently
}
elif plan.cc == 86:
plan.tile_description = {
"threadblock_shape": [128, 64, 64],
"warp_count": [2, 2, 1],
"stages": 3,
"instruction_shape": [16, 8, 16],
"cluster_shape": [1, 1, 1]
}
plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)
assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Iterator AlgorithmThe iterator algorithm describes how sources are loaded from memory. There are some iterator algorithms optimized for specific alignments and input/output channels that have better performance. The table below illustrates the available iterator algorithms.|Conv Kind | Iterator Algorithm | Description || -- | -- | -- ||Fprop | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimized for and requires `R <= 32`, `S<= 32`, and `C % alignment_input == 0`|| | "few_channels" | optimized for small `C` and requires `C % alignment_input == 0`|| | "fixed_channels" | optimized for small `C` and requires `C == alignment_input` ||Dgrad | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimzed for and require `R <= 32`, `S<= 32`, `K % alignment_grad_output == 0`, and `C % alignment_weight == 0`||Wgrad | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimized for and require `K % alignment_grad_output == 0`, and `C % alignment_input == 0`|By default, the Python interface will automatically propose a suitable iterator algorithm based on the input tensors in `plan.run()`. However, the user can also specify the desired iterator algorithm as follows<jupyter_code>plan.iterator_algorithm = "analytic"
plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)
assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>If the iterator algorithm is invalid for the problem size in `plan.run()`, an exception will be raised. Swizzling StrideThe swizzling changes how the tile are mapped to threadblocks to improve the L2 Locality. Given a swizzling stride `N`, the threadblock `(tb_x, tb_y)` computes tile `(tb_x / N, tb_y * N + (tb_x % N))`. Currently, stride values of `1`, `2`, `4`, and `8` are supported for `fprop`, `wgrad`, and `1`, and `4` for `dgrad`. The swizzling stride can be set with:<jupyter_code>plan.swizzling_stride = 4
plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)
assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Split-KSplit-K is usually applied when the Conv2d has small spatial dimensions and large reduction dimension to ensure good utilization. It further partitions the reduction dimension to different threadblocks. The CUTLASS Python interface supports two types of split-K strategies: `Parallel`, and `Serial`. * `Parallel`: the partial results from different threadblocks are stored in a temporary buffer in the global memory. When the Conv2d is done, a separate reduction kernel is created and launched to reduce the partial results.* `Serial`: A semaphore is used to coordinate the order of different threadblocks adding their partial results to a given output tile. A separate kernel does not need to be launched for prforming the reduction.While all `fprop`, `dgrad`, and `wgrad` support split-K, here we use `wgrad` as an example.<jupyter_code># Parallel Split-K with 5 slices
grad_weight_parallel = torch.zeros_like(grad_weight)
plan_wgrad.run(
grad_output, input, tensor_C_wgrad, grad_weight_parallel,
stride, padding, dilation, alpha, beta, print_module=print_module, split_k=("parallel", 5))
assert torch.equal(grad_weight_torch, grad_weight_parallel)
# Serial Split-K with 3 slices
grad_weight_serial = torch.zeros_like(grad_weight)
plan_wgrad.run(
grad_output, input, tensor_C_wgrad, grad_weight_serial,
stride, padding, dilation, alpha, beta, print_module=print_module, split_k=("serial", 3))
assert torch.equal(grad_weight_torch, grad_weight_serial)<jupyter_output><empty_output> | cutlass/examples/python/03_basic_conv2d.ipynb/0 | {
"file_path": "cutlass/examples/python/03_basic_conv2d.ipynb",
"repo_id": "cutlass",
"token_count": 4484
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/atom/mma_atom.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/tensor.hpp>
namespace cute
{
template <class... Args>
struct Copy_Atom;
template <class CopyOperation, class CopyInternalType>
struct Copy_Atom<CopyOperation, CopyInternalType> : Copy_Atom<Copy_Traits<CopyOperation>, CopyInternalType>
{};
template <class... Args, class CopyInternalType>
struct Copy_Atom<Copy_Traits<Args...>, CopyInternalType>
: Copy_Traits<Args...>
{
using Traits = Copy_Traits<Args...>;
// Bit and Thr layouts from the Copy_Traits
using ThrID = typename Traits::ThrID;
using BitLayoutSrc = typename Traits::SrcLayout;
using BitLayoutDst = typename Traits::DstLayout;
using BitLayoutRef = typename Traits::RefLayout;
using ValType = CopyInternalType;
using ValLayoutSrc = decltype(recast_layout<uint1_t, ValType>(BitLayoutSrc{}));
using ValLayoutDst = decltype(recast_layout<uint1_t, ValType>(BitLayoutDst{}));
using ValLayoutRef = decltype(recast_layout<uint1_t, ValType>(BitLayoutRef{}));
CUTE_STATIC_ASSERT_V(size<0>(ValLayoutSrc{}) == size(ThrID{}), "CopyOperation is not valid for Src of ValType.");
CUTE_STATIC_ASSERT_V(size<0>(ValLayoutDst{}) == size(ThrID{}), "CopyOperation is not valid for Dst of ValType.");
CUTE_STATIC_ASSERT_V(size<0>(ValLayoutRef{}) == size(ThrID{}), "CopyOperation is not valid for Ref of ValType.");
static constexpr int NumValSrc = size<1>(ValLayoutSrc{});
static constexpr int NumValDst = size<1>(ValLayoutDst{});
// Additional Trait parameters/transformations
template <class... TraitsArgs>
CUTE_HOST_DEVICE
auto
with(TraitsArgs&&... args) const {
auto traits = Traits::with(static_cast<TraitsArgs&&>(args)...);
return Copy_Atom<decltype(traits), CopyInternalType>{traits};
}
//
// Tensor call interfaces
//
// Check and call instruction, or recurse
template <class SEngine, class SLayout,
class DEngine, class DLayout>
CUTE_HOST_DEVICE
void
call(Tensor<SEngine,SLayout> const& src,
Tensor<DEngine,DLayout> & dst) const
{
static_assert(SLayout::rank == 1, "Expected rank-1 src tensor");
static_assert(DLayout::rank == 1, "Expected rank-1 dst tensor");
if constexpr (is_constant<NumValSrc, decltype(size(src))>::value ||
is_constant<NumValDst, decltype(size(dst))>::value) {
// Dispatch to unpack to execute instruction
return copy_unpack(*this, src, dst);
} else
if constexpr (is_tuple<decltype(shape(src))>::value &&
is_tuple<decltype(shape(dst))>::value) {
// If the size of the src/dst doesn't match the instruction,
// recurse this rank-1 layout by peeling off the mode
// ((A,B,C,...)) -> (A,B,C,...)
return copy(*this, tensor<0>(src), tensor<0>(dst));
} else {
static_assert(dependent_false<SEngine>, "No instruction match and no recursion possible.");
}
}
// Accept mutable temporaries
template <class SEngine, class SLayout,
class DEngine, class DLayout>
CUTE_HOST_DEVICE
void
call(Tensor<SEngine,SLayout> const& src,
Tensor<DEngine,DLayout> && dst) const
{
return call(src, dst);
}
};
//
// A tiling of copy atoms
//
template <class TiledCopy, class ThrIdx>
struct ThrCopy;
template <class Copy_Atom,
class LayoutCopy_TV, // (tid,vid) -> coord [Need not be 2D...]
class ShapeTiler_MN> // coord space
struct TiledCopy : Copy_Atom
{
// Layout information from the CopyAtom
using AtomThrID = typename Copy_Atom::ThrID; // thrid -> thr_idx
using AtomLayoutSrc = typename Copy_Atom::ValLayoutSrc; // (thr,val) -> offset
using AtomLayoutDst = typename Copy_Atom::ValLayoutDst; // (thr,val) -> offset
using AtomLayoutRef = typename Copy_Atom::ValLayoutRef; // (thr,val) -> offset
using AtomNumThr = decltype(size<0>(AtomLayoutRef{}));
using AtomNumVal = decltype(size<1>(AtomLayoutRef{}));
// Layout information for the TiledCopy
using Tiler_MN = ShapeTiler_MN;
using TiledLayout_TV = LayoutCopy_TV;
using TiledNumThr = decltype(size<0>(TiledLayout_TV{}));
using TiledNumVal = decltype(size<1>(TiledLayout_TV{}));
CUTE_STATIC_ASSERT_V(TiledNumThr{} % AtomNumThr{} == Int<0>{}, "TiledCopy uses too few thrs for selected CopyAtom");
CUTE_STATIC_ASSERT_V(TiledNumVal{} % AtomNumVal{} == Int<0>{}, "TiledCopy uses too few vals for selected CopyAtom");
// Tile a tensor or a layout from shape
// (M,N,...)
// to shape
// ((ThrV,ThrX),FrgV,(RestM,RestN,...))
// where
// ThrV: The threads local to a COPY_ATOM Src.
// ThrX: The threads tiled across COPY_ATOMs Src.
// FrgV: The values local to a COPY_ATOM Src.
// RestM: The values tiled in M.
// RestN: The values tiled in N.
template <class STensor>
CUTE_HOST_DEVICE constexpr static
auto
tidfrg_S(STensor&& stensor)
{
CUTE_STATIC_ASSERT_V(rank(stensor) >= rank(Tiler_MN{}), "Rank of tensor to be partitioned too small.");
// Tile the stensor and compute the (src-thr, src-val) -> (ref-thr, ref-val) layout
return tile2thrfrg(zipped_divide(stensor,Tiler_MN{}), right_inverse(AtomLayoutRef{}).compose(AtomLayoutSrc{}));
}
// Tile a tensor or a layout from shape
// (M,N,...)
// to shape
// ((ThrV,ThrX),FrgV,(RestM,RestN,...))
// where
// ThrV: The threads local to a COPY_ATOM Dst.
// ThrX: The threads tiled across COPY_ATOMs Dst.
// FrgV: The values local to a COPY_ATOM Dst.
// RestM: The values tiled in M.
// RestN: The values tiled in N.
template <class DTensor>
CUTE_HOST_DEVICE constexpr static
auto
tidfrg_D(DTensor&& dtensor)
{
CUTE_STATIC_ASSERT_V(rank(dtensor) >= rank(Tiler_MN{}), "Rank of tensor to be partitioned too small.");
// Tile the dtensor and compute the (dst-thr, dst-val) -> (ref-thr, ref-val) layout
return tile2thrfrg(zipped_divide(dtensor,Tiler_MN{}), right_inverse(AtomLayoutRef{}).compose(AtomLayoutDst{}));
}
// Tile a tensor or a layout from shape
// ((TileM,TileN,...), (RestM,RestN,...))
// to shape
// ((ThrV,ThrX),FrgV,(RestM,RestN,...))
template <class Tensor, class Ref2TrgLayout>
CUTE_HOST_DEVICE constexpr static
auto
tile2thrfrg(Tensor&& tensor, Ref2TrgLayout const& ref2trg)
{
// Take the thrs/vals that the atom is interested in
// NOTE: Assumes the AtomNumThr are contiguous and identity within TiledThrID
auto atom_layout_TV = zipped_divide(TiledLayout_TV{}, make_shape(AtomNumThr{}, AtomNumVal{}));
// ((atom_tid,atom_val),(rest_tid,rest_val)) -> (m,n)
// Transform to the trg layout
auto trg_layout_TV = atom_layout_TV.compose(ref2trg, _);
// ((trg_tid,trg_val),(rest_tid,rest_val)) -> (m,n)
// Transform the thrs mode from thrid to thr_idx
// NOTE: Assumes the AtomNumThr are contiguous and identity within TiledThrID
auto thrval2mn = coalesce(zip(trg_layout_TV), Shape<_1,Shape<_1,_1>>{});
// ((trg_tid,rest_tid),(trg_val,rest_val)) -> (m,n)
/// ==================
// Transform the tile mode
auto tv_tensor = tensor.compose(thrval2mn, _);
// ((thrid,val),(RestM,RestN,...))
// Unfold and return
return tv_tensor(make_coord(_,_), _);
}
// retile_S and retile_D assume they are working with the reference layout -- they are the same
template <class Tensor>
CUTE_HOST_DEVICE constexpr static
auto
retile(Tensor&& tensor)
{
constexpr int R = remove_cvref_t<Tensor>::rank;
// Assert that AtomLayoutSrc|Dst is identity so we can skip the Ref transformation
// Assume the first size<0>(tensor) elements are the first val_ids in TiledLayout_TV.
// Then, we only need the shape+layout of those size<0>(tensor) elements in TiledLayout_TV
// and that shape is what we gather from the other modes of tensor
auto V = size<0>(tensor);
auto frg_layout_mn = upcast<TiledNumThr{} * V>(right_inverse(TiledLayout_TV{}).with_shape(shape(Tiler_MN{})));
// (m,n) -> v_idx -- The shape and order of the V inside of TiledLayout_TV
auto frg_layout_v = zipped_divide(logical_product(make_layout(V), right_inverse(frg_layout_mn)), make_layout(AtomNumVal{}));
// (atom_vals,rest_vals) -> (v,m,n)
/// =======
// Tile the tensor for TileFrg
auto t_tensor = zipped_divide(tensor, prepend(product_each(shape(frg_layout_mn)), V));
// ((TileV,TileM,TileN,...),(1,RestM,RestN,...))
// Transform the tile mode
auto v_tensor = t_tensor.compose(frg_layout_v, _);
// ((atom_vals,rest_vals),(1,RM,RN,...))
// Unfold and return
return v_tensor(_, append<R>(Int<0>{},_));
}
CUTE_HOST_DEVICE constexpr static
auto
get_layoutS_TV()
{
// (M,N) -> (M,N)
auto ref_S = make_layout(make_shape(shape(Tiler_MN{}), Int<1>{}));
// (thr_idx,val_idx) -> (M,N)
return tile2thrfrg(ref_S, right_inverse(AtomLayoutRef{}).compose(AtomLayoutSrc{}))(_,_,Int<0>{});
}
CUTE_HOST_DEVICE constexpr static
auto
get_layoutS_MN()
{
// (thr_idx,val_idx) -> (M,N)
auto layoutS_TV = get_layoutS_TV();
// (M,K) -> (thr_idx,val_idx)
auto layoutS_MK = right_inverse(layoutS_TV).with_shape(shape(Tiler_MN{}));
// athrid = (v,m,k) -> thr_idx
auto thrID_S = make_layout(size<0>(TiledLayout_TV{}));
return cute::make_tuple(layoutS_MK, thrID_S);
}
CUTE_HOST_DEVICE constexpr static
auto
get_layoutD_TV()
{
// (M,N) -> (M,N)
auto ref_D = make_layout(make_shape(shape(Tiler_MN{}), Int<1>{}));
// (thr_idx,val_idx) -> (M,N)
return tile2thrfrg(ref_D, right_inverse(AtomLayoutRef{}).compose(AtomLayoutDst{}))(_,_,Int<0>{});
}
CUTE_HOST_DEVICE constexpr static
auto
get_layoutD_MN()
{
// (thr_idx,val_idx) -> (M,N)
auto layoutD_TV = get_layoutD_TV();
// (M,K) -> (thr_idx,val_idx)
auto layoutD_MK = right_inverse(layoutD_TV).with_shape(shape(Tiler_MN{}));
// athrid = (v,m,k) -> thr_idx
auto thrID_D = make_layout(size<0>(TiledLayout_TV{}));
return cute::make_tuple(layoutD_MK, thrID_D);
}
template <class ThrIdx,
__CUTE_REQUIRES(is_integral<ThrIdx>::value)>
CUTE_HOST_DEVICE static
auto
get_slice(ThrIdx const& thr_idx)
{
return ThrCopy<TiledCopy, ThrIdx>(thr_idx);
}
template <class ThrIdx,
__CUTE_REQUIRES(is_integral<ThrIdx>::value)>
CUTE_HOST_DEVICE static
auto
get_thread_slice(ThrIdx const& thr_idx)
{
return get_slice(thr_idx);
}
};
template <class TiledCopy, class ThrIdx>
struct ThrCopy
{
ThrIdx thr_idx_;
CUTE_HOST_DEVICE
ThrCopy(ThrIdx const& thr_idx) : thr_idx_(thr_idx) {}
template <class STensor>
CUTE_HOST_DEVICE
auto
partition_S(STensor&& stensor) const {
//static_assert(sizeof(typename remove_cvref_t<STensor>::value_type) == sizeof(typename TiledCopy::ValType),
// "Expected ValType for tiling SrcTensor.");
auto thr_tensor = make_tensor(static_cast<STensor&&>(stensor).data(), TiledCopy::tidfrg_S(stensor.layout()));
return thr_tensor(thr_idx_, _, repeat<rank_v<STensor>>(_));
}
template <class DTensor>
CUTE_HOST_DEVICE
auto
partition_D(DTensor&& dtensor) const {
//static_assert(sizeof(typename remove_cvref_t<DTensor>::value_type) == sizeof(typename TiledCopy::ValType),
// "Expected ValType for tiling DstTensor.");
auto thr_tensor = make_tensor(static_cast<DTensor&&>(dtensor).data(), TiledCopy::tidfrg_D(dtensor.layout()));
return thr_tensor(thr_idx_, _, repeat<rank_v<DTensor>>(_));
}
template <class STensor>
CUTE_HOST_DEVICE static
auto
retile_S(STensor&& stensor) {
// static_assert(sizeof(typename remove_cvref_t<STensor>::value_type) == sizeof(typename TiledCopy::ValType),
// "Expected ValType for tiling SrcTensor.");
return make_tensor(static_cast<STensor&&>(stensor).data(), TiledCopy::retile(stensor.layout()));
}
template <class DTensor>
CUTE_HOST_DEVICE static
auto
retile_D(DTensor&& dtensor) {
// static_assert(sizeof(typename remove_cvref_t<DTensor>::value_type) == sizeof(typename TiledCopy::ValType),
// "Expected ValType for tiling DstTensor.");
return make_tensor(static_cast<DTensor&&>(dtensor).data(), TiledCopy::retile(dtensor.layout()));
}
};
template <class... Args,
class LayoutCopy_TV,
class Tiler>
CUTE_HOST_DEVICE
auto
make_tiled_copy_impl(Copy_Atom<Args...> const& atom,
LayoutCopy_TV const&,
Tiler const&)
{
return TiledCopy<Copy_Atom<Args...>, LayoutCopy_TV, Tiler>{atom};
}
//
// These tile the Copy_Atom as a whole
//
template <class... CArgs, class... MArgs>
CUTE_HOST_DEVICE
auto
make_tiled_copy_A(Copy_Atom<CArgs...> const& copy_atom,
TiledMMA<MArgs...> const& mma)
{
return make_tiled_copy_impl(copy_atom, mma.get_layoutA_TV(), make_shape(tile_size<0>(mma),tile_size<2>(mma)));
}
template <class... CArgs, class... MArgs>
CUTE_HOST_DEVICE
auto
make_tiled_copy_B(Copy_Atom<CArgs...> const& copy_atom,
TiledMMA<MArgs...> const& mma)
{
return make_tiled_copy_impl(copy_atom, mma.get_layoutB_TV(), make_shape(tile_size<1>(mma),tile_size<2>(mma)));
}
template <class... CArgs, class... MArgs>
CUTE_HOST_DEVICE
auto
make_tiled_copy_C(Copy_Atom<CArgs...> const& copy_atom,
TiledMMA<MArgs...> const& mma)
{
return make_tiled_copy_impl(copy_atom, mma.get_layoutC_TV(), make_shape(tile_size<0>(mma),tile_size<1>(mma)));
}
// returns the smallest tiled copy that can retile LayoutC_TV
// for use with pipelined epilogues with subtiled stores
template <class... CArgs, class... MArgs>
CUTE_HOST_DEVICE
auto
make_tiled_copy_C_atom(Copy_Atom<CArgs...> const& copy_atom,
TiledMMA<MArgs...> const& mma)
{
// Truncate the V-layout to just the Copy_Atom, keep the V-order
auto layoutC_TV = mma.get_layoutC_TV();
auto copy_V = Int<Copy_Atom<CArgs...>::NumValSrc>{};
CUTE_STATIC_ASSERT_V(copy_V <= size<1>(layoutC_TV));
auto layout_TV = composition(layoutC_TV, make_layout(make_shape(size<0>(layoutC_TV), copy_V)));
// Recompute tiler and restride the TV layout for the new tiler
// Tiler -- Find the active elements in the MMA tensor and generate a tiler to extract them
// Convert to the awkward by-mode tiler to preserve the modes of the tiled MMA
auto mma_tiler = make_shape(tile_size<0>(mma),tile_size<1>(mma));
auto mma_zeros = repeat_like(mma_tiler, Int<0>{});
auto tiler = transform(make_seq<rank(mma_tiler)>{}, [&](auto i) {
return filter(composition(make_layout(mma_tiler, replace<i>(mma_zeros, Int<1>{})), layout_TV));
});
// Layout_TV -- Find the (tid,vid) -> tile coord transformation
// Apply the tiler to a reference and transform the codomain
// tile_coord -> mma_coord
auto tile2mma = composition(make_layout(mma_tiler), tiler);
// (tid,vid) -> tile_coord
auto layout_tv = composition(left_inverse(tile2mma), layout_TV);
return make_tiled_copy_impl(copy_atom, layout_tv, tiler);
}
/** Produce a TiledCopy from logical thread and values layouts.
* The thread and value layouts map coordinates to thr_idx and val_idx.
* The product of these layouts is taken to produce the TV layout and the Tiler.
* Useful when threads and values need very specific mappings onto coordinates
* in the target tensors.
*/
template <class... Args,
class ThrLayout,
class ValLayout = Layout<_1>>
CUTE_HOST_DEVICE
auto
make_tiled_copy(Copy_Atom<Args...> const& copy_atom,
ThrLayout const& thr_layout = {}, // (m,n) -> thr_idx
ValLayout const& val_layout = {}) // (m,n) -> val_idx
{
// Take the raked_products to compute the Layout_MN
// (M,N) -> (thr_idx, val_idx)
auto layout_mn = raked_product(thr_layout, val_layout);
// (thr_idx, val_idx) -> (M,N)
auto layout_tv = right_inverse(layout_mn).with_shape(make_shape(size(thr_layout), size(val_layout)));
// Tiler for extracting relevant elements
// (M,N) -> tensor coord
auto tiler = product_each(shape(layout_mn));
#if 0
print("thr_layout: "); print(thr_layout); print("\n");
print("val_layout: "); print(val_layout); print("\n");
print("layout_mn : "); print(layout_mn); print("\n");
print("layout_tv : "); print(layout_tv); print("\n");
print("tiler : "); print(tiler); print("\n");
#endif
return make_tiled_copy_impl(copy_atom, layout_tv, tiler);
}
/** Produce a TiledCopy from thread and value offset maps.
* The TV Layout maps threads and values to the codomain of the data_layout.
* It is verified that the intended codomain is valid within data_layout.
* Useful when threads and values don't care about owning specific coordinates, but
* care more about the vector-width and offsets between them.
*/
template <class... Args, class AtomTVLayout, class DataLayout>
CUTE_HOST_DEVICE constexpr
auto
make_cotiled_copy(Copy_Atom<Args...> const& copy_atom,
AtomTVLayout const& atom_tv_layout, // atom (thr,val) -> data addr
DataLayout const& data_layout) // coord -> data addr The target layout
{
static_assert(is_static<AtomTVLayout>::value);
static_assert(is_static<DataLayout>::value);
// data addr -> data coord Append 1:0 so off-the-ends get the stride-0
auto inv_data_layout = make_layout(left_inverse(data_layout), Layout<_1,_0>{});
// (tid,vid) -> data_coord
auto layout_tv_data = composition(inv_data_layout, atom_tv_layout);
// Check validity
CUTE_STATIC_ASSERT_V(coalesce(composition(data_layout, layout<1>(layout_tv_data))) == coalesce(layout<1>(atom_tv_layout)),
"The memory pointed to by AtomTVLayout does not exist in the DataLayout.");
#if 0
if (thread0()) {
print("data_layout : "); print(data_layout); print("\n");
print("atom_tv_layout : "); print(atom_tv_layout); print("\n");
print("layout_tv_data : "); print(layout_tv_data); print("\n");
}
#endif
//
// Tiler -- Find the active elements in the DATA tensor and generate a tiler to extract them
//
// Convert to the awkward by-mode tiler to preserve the modes of the tiled DATA
auto flat_data_shape = product_each(shape(data_layout));
auto flat_data_zeros = repeat<rank(flat_data_shape)>(Int<0>{});
auto tiler = transform(make_seq<rank(flat_data_shape)>{}, [&](auto i) {
return filter(composition(make_layout(flat_data_shape, replace<i>(flat_data_zeros, Int<1>{})), layout_tv_data));
});
//
// Layout_TV -- Find the (tid,vid) -> tile coord transformation
//
// Apply the tiler to a reference and transform the codomain
// tile_coord -> data_coord
auto tile2data = composition(make_layout(flat_data_shape), tiler);
// (tid,vid) -> tile_coord
auto layout_tv = composition(left_inverse(tile2data), layout_tv_data);
#if 0
if (thread0()) {
print("tiler : "); print(tiler); print("\n");
print("tile2data : "); print(tile2data); print("\n");
print("layout_tv : "); print(layout_tv); print("\n");
}
#endif
return make_tiled_copy_impl(copy_atom, layout_tv, tiler);
}
// Make a TiledCopy out of the copy_atom that matches the Src-Layout of tiled_copy
template <class... Args,
class TiledCopy>
CUTE_HOST_DEVICE
auto
make_tiled_copy_S(Copy_Atom<Args...> const& copy_atom,
TiledCopy const& tiled_copy)
{
return make_tiled_copy_impl(copy_atom, tiled_copy.get_layoutS_TV(), typename TiledCopy::Tiler_MN{});
}
// Make a TiledCopy out of the copy_atom that matches the Dst-Layout of tiled_copy
template <class... Args,
class TiledCopy>
CUTE_HOST_DEVICE
auto
make_tiled_copy_D(Copy_Atom<Args...> const& copy_atom,
TiledCopy const& tiled_copy)
{
return make_tiled_copy_impl(copy_atom, tiled_copy.get_layoutD_TV(), typename TiledCopy::Tiler_MN{});
}
//
// Size
//
// The logical size of a TileCopy
template <int... I, class... Args>
CUTE_HOST_DEVICE constexpr
auto
tile_size(TiledCopy<Args...> const&)
{
return size<I...>(typename TiledCopy<Args...>::Tiler_MN{});
}
// The number of threads involved in a TiledCopy
template <class... Args>
CUTE_HOST_DEVICE constexpr
auto
size(TiledCopy<Args...> const&)
{
return typename TiledCopy<Args...>::TiledNumThr{};
}
//
// Display utilities
//
template <class... Args, class T>
CUTE_HOST_DEVICE
void
print(Copy_Atom<Copy_Traits<Args...>, T> const&)
{
using Atom = Copy_Atom<Copy_Traits<Args...>, T>;
print("Copy_Atom\n");
print(" ThrID: "); print(typename Atom::ThrID{}); print("\n");
print(" ValLayoutSrc: "); print(typename Atom::ValLayoutSrc{}); print("\n");
print(" ValLayoutDst: "); print(typename Atom::ValLayoutDst{}); print("\n");
print(" ValLayoutRef: "); print(typename Atom::ValLayoutRef{}); print("\n");
print(" ValueType: "); print(sizeof_bits<typename Atom::ValType>::value); print("b\n");
}
template <class Atom, class... Args>
CUTE_HOST_DEVICE
void
print(TiledCopy<Atom, Args...> const& copy, char const* pad = "")
{
using Copy = TiledCopy<Atom, Args...>;
print("TiledCopy\n");
print(" Tiler_MN: "); print(typename Copy::Tiler_MN{}); print("\n");
print(" TiledLayout_TV: "); print(typename Copy::TiledLayout_TV{}); print("\n");
print(static_cast<Atom const&>(copy));
}
template <class TiledCopy, class ThrIdx>
CUTE_HOST_DEVICE
void
print(ThrCopy<TiledCopy, ThrIdx> const& thr_copy)
{
print("ThrCopy\n");
print(" ThrIdx: "); print(thr_copy.thr_idx_); print("\n");
print(TiledCopy{});
}
template <class... Args>
CUTE_HOST_DEVICE
auto
print_latex(TiledCopy<Args...> const& copy)
{
auto [layoutS_MN, thrID_S] = copy.get_layoutS_MN();
auto [layoutD_MN, thrID_D] = copy.get_layoutD_MN();
print_latex_copy(layoutS_MN, thrID_S,
layoutD_MN, thrID_D);
}
// MNK Copy Layout to Latex TIKZ -- 8-value color coded by thread
template <class LayoutS, class ThrIDS,
class LayoutD, class ThrIDD>
CUTE_HOST_DEVICE
void
print_latex_copy(LayoutS const& S, ThrIDS const& TS, // (m,n) -> (tid,vid) and tid -> thr_idx
LayoutD const& D, ThrIDD const& TD) // (m,n) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(S) == Int<2>{});
CUTE_STATIC_ASSERT_V(rank(D) == Int<2>{});
assert(size<0>(S) == size<0>(D));
assert(size<1>(S) == size<1>(D));
char const* latex_header =
"\\documentclass{standalone}\n"
"\\usepackage{tikz}\n"
"\\usetikzlibrary{external}\n"
"\\tikzexternalize\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}",
"{rgb,255:red,175;green,255;blue,175}",
"{rgb,255:red,255;green,255;blue,175}",
"{rgb,255:red,255;green,175;blue,175}",
"{rgb,255:red,210;green,210;blue,255}",
"{rgb,255:red,210;green,255;blue,210}",
"{rgb,255:red,255;green,255;blue,210}",
"{rgb,255:red,255;green,210;blue,210}",};
// Header
printf("%% LayoutS: "); print(S); printf("\n");
printf("%% ThrIDS : "); print(TS); printf("\n");
printf("%% LayoutD: "); print(D); printf("\n");
printf("%% ThrIDD : "); print(TD); printf("\n\n");
printf(latex_header);
// S starting at 0,0
for (int i = 0; i < size<0>(S); ++i) {
for (int j = 0; j < size<1>(S); ++j) {
int thrid = S(i,j) % size(TS);
int val_idx = S(i,j) / size(TS);
int thr_idx = TS(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
i, j,
thr_idx, val_idx);
}
}
// D starting at 0,size<1>(S)+3
for (int i = 0; i < size<0>(D); ++i) {
for (int j = 0; j < size<1>(D); ++j) {
int thrid = D(i,j) % size(TD);
int val_idx = D(i,j) / size(TD);
int thr_idx = TD(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
i, j + size<1>(S) + 3,
thr_idx, val_idx);
}
}
// S Labels
for (int i = 0, j = -1; i < size<0>(S); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i);
}
for (int j = 0, i = -1; j < size<1>(S); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j);
}
// D Labels
for (int i = 0, j = size<1>(D); i < size<0>(S); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j + size<1>(S) + 3, i);
}
for (int j = 0, i = -1; j < size<1>(D); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j + size<1>(S) + 3, j);
}
// Footer
printf(latex_footer);
}
} // end namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/atom/copy_traits_sm50.hpp>
#include <cute/atom/copy_traits_sm75.hpp>
#include <cute/atom/copy_traits_sm80.hpp>
#include <cute/atom/copy_traits_sm90.hpp>
// Config
#if (__CUDACC_VER_MAJOR__ >= 12)
# define CUTE_COPY_ATOM_TMA_SM90_ENABLED
#endif
#if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
#include <cute/atom/copy_traits_sm90_tma.hpp>
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cute/atom/copy_atom.hpp/0 | {
"file_path": "cutlass/include/cute/atom/copy_atom.hpp",
"repo_id": "cutlass",
"token_count": 11355
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/mma_sm90.hpp>
#include <cute/atom/mma_traits.hpp>
#include <cute/tensor.hpp>
namespace cute {
// Fence between the async destination accumulators of GMMA & source for their dependent use
template <class Engine, class Layout>
CUTE_HOST_DEVICE
void
warpgroup_fence_operand(Tensor<Engine, Layout>& frg) {
CUTE_STATIC_ASSERT(is_static<Layout>::value);
if constexpr (is_same_v<typename Engine::value_type, float>) {
auto f32_frg = recast<float>(frg);
CUTE_UNROLL
for (int i = 0; i < size(f32_frg); ++i) {
warpgroup_fence_operand(f32_frg(i));
}
}
else {
CUTE_STATIC_ASSERT(is_rmem<Engine>::value);
auto u32_frg = recast<uint32_t>(frg);
CUTE_UNROLL
for (int i = 0; i < size(u32_frg); ++i) {
warpgroup_fence_operand(u32_frg(i));
}
}
}
namespace GMMA {
///////////////////////////////////////////
// Common layouts for GMMA Shared Memory //
///////////////////////////////////////////
// M|N-major GMMA layouts in units of bits
using Layout_MN_INTER_Atom_Bits = ComposedLayout<Swizzle<0,4,3>, smem_ptr_flag, Layout<Shape< _128,_8>,Stride<_1, _128>>>;
using Layout_MN_SW32_Atom_Bits = ComposedLayout<Swizzle<1,4,3>, smem_ptr_flag, Layout<Shape< _256,_8>,Stride<_1, _256>>>;
using Layout_MN_SW64_Atom_Bits = ComposedLayout<Swizzle<2,4,3>, smem_ptr_flag, Layout<Shape< _512,_8>,Stride<_1, _512>>>;
using Layout_MN_SW128_Atom_Bits = ComposedLayout<Swizzle<3,4,3>, smem_ptr_flag, Layout<Shape<_1024,_8>,Stride<_1,_1024>>>;
// K-major GMMA layouts in units of bits
using Layout_K_INTER_Atom_Bits = ComposedLayout<Swizzle<0,4,3>, smem_ptr_flag, Layout<Shape<_8, _128>,Stride< _128,_1>>>;
using Layout_K_SW32_Atom_Bits = ComposedLayout<Swizzle<1,4,3>, smem_ptr_flag, Layout<Shape<_8, _256>,Stride< _256,_1>>>;
using Layout_K_SW64_Atom_Bits = ComposedLayout<Swizzle<2,4,3>, smem_ptr_flag, Layout<Shape<_8, _512>,Stride< _512,_1>>>;
using Layout_K_SW128_Atom_Bits = ComposedLayout<Swizzle<3,4,3>, smem_ptr_flag, Layout<Shape<_8,_1024>,Stride<_1024,_1>>>;
// M|N-major layouts in units of Type
template <class Type>
using Layout_MN_INTER_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_INTER_Atom_Bits{}));
template <class Type>
using Layout_MN_SW32_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW32_Atom_Bits{}));
template <class Type>
using Layout_MN_SW64_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW64_Atom_Bits{}));
template <class Type>
using Layout_MN_SW128_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW128_Atom_Bits{}));
// K-major layouts in units of Type
template <class Type>
using Layout_K_INTER_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_INTER_Atom_Bits{}));
template <class Type>
using Layout_K_SW32_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW32_Atom_Bits{}));
template <class Type>
using Layout_K_SW64_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW64_Atom_Bits{}));
template <class Type>
using Layout_K_SW128_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW128_Atom_Bits{}));
// With GMMA::Major param
template <class Type, GMMA::Major tnsp>
using Layout_INTER_Atom = typename conditional<tnsp == GMMA::Major::MN,
Layout_MN_INTER_Atom<Type>,
Layout_K_INTER_Atom<Type>>::type;
template <class Type, GMMA::Major tnsp>
using Layout_SW32_Atom = typename conditional<tnsp == GMMA::Major::MN,
Layout_MN_SW32_Atom<Type>,
Layout_K_SW32_Atom<Type>>::type;
template <class Type, GMMA::Major tnsp>
using Layout_SW64_Atom = typename conditional<tnsp == GMMA::Major::MN,
Layout_MN_SW64_Atom<Type>,
Layout_K_SW64_Atom<Type>>::type;
template <class Type, GMMA::Major tnsp>
using Layout_SW128_Atom = typename conditional<tnsp == GMMA::Major::MN,
Layout_MN_SW128_Atom<Type>,
Layout_K_SW128_Atom<Type>>::type;
//
// Tensor (position-dependent swizzle) to LayoutType utility
//
template <class Engine, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
LayoutType
layout_type(Tensor<Engine, Layout<Shape,Stride>> const&)
{
static_assert(is_same<uint128_t, typename Engine::value_type>::value,
"Expected uint128_t type in LayoutType conversion.");
using Swizzle = get_swizzle_t<Engine>;
constexpr int B = Swizzle::num_bits;
constexpr int M = Swizzle::num_base;
constexpr int S = Swizzle::num_shft;
static_assert(M == 4, "Unsupported layout swizzle");
static_assert(0 <= B && B <= 3, "Unsupported layout swizzle");
static_assert(S == 3, "Unsupported layout swizzle");
switch (B) {
case 0: return LayoutType::INTERLEAVE;
case 1: return LayoutType::B32;
case 2: return LayoutType::B64;
case 3: return LayoutType::B128;
}
return LayoutType::INTERLEAVE; // ERROR
}
///////////////////////////////////////////////////////////////////////////////
// Construction method for GMMA Descriptors
///////////////////////////////////////////////////////////////////////////////
/**
* ///////////////////////////////
* // make_gmma_desc<Major::MN> //
* ///////////////////////////////
* Each GmmaDescriptor Major-MN describes a canonical layout of the form
*
* LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((T,1,m),(8,k)):((1,T,SBO),(1T,LBO))
* LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((T,2,m),(8,k)):((1,T,LBO),(2T,SBO))
* LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((T,4,m),(8,k)):((1,T,LBO),(4T,SBO))
* LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((T,8,m),(8,k)):((1,T,LBO),(8T,SBO))
*
* where
* T : sizeof(uint128_t) / sizeof(value_type)
* m : integer in [1,16] corresponding to GMMA shape
* k : integer in [1,32] corresponding to GMMA shape
* SBO: stride byte offset
* LBO: leading byte offset
*
* See GMMA::Layout_MN_XXX_Atom<value_type> for building canonical GmmaDescriptor Major-MN layouts.
* For example,
* auto smem_layout = tile_to_shape(Layout_MN_SW128_Atom<value_type>{}, Shape<_128,_64>{});
* is guaranteed to be accepted by make_gmma_desc<Major::MN> for appropriate value_type.
*
* //////////////////////////////
* // make_gmma_desc<Major::K> //
* //////////////////////////////
* Each GmmaDescriptor Major-K describes a canonical layout of the form
*
* LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((8,m),(T,2)):((1T,SBO),(1,LBO))
* LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((8,m),(T,2)):((2T,SBO),(1, T ))
* LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((8,m),(T,2)):((4T,SBO),(1, T ))
* LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,m),(T,2)):((8T,SBO),(1, T ))
*
* See GMMA::Layout_K_XXX_Atom<value_type> for building canonical GmmaDescriptor Major-K layouts.
* For example,
* auto smem_layout = tile_to_shape(Layout_K_SW128_Atom<value_type>{}, Shape<_128,_64>{});
* is guaranteed to be accepted by make_gmma_desc<Major::K> for appropriate value_type.
*/
template <GMMA::Major MajorMode, class TEngine, class TLayout>
CUTE_HOST_DEVICE constexpr
GmmaDescriptor
make_gmma_desc(Tensor<TEngine,TLayout> const& tensor)
{
static_assert(is_smem<TEngine>::value, "GMMA Descriptors can only be constructed on smem.");
static_assert(TLayout::rank == 2, "GMMA Descriptors can only be constructed on rank-2 tensors.");
using value_type = typename TEngine::value_type;
Tensor u128_tensor = recast<uint128_t const>(tensor);
// Result
GmmaDescriptor desc;
// Layout type
constexpr GMMA::LayoutType LAYOUT_TYPE = GMMA::layout_type(u128_tensor);
desc.bitfield.layout_type_ = uint8_t(LAYOUT_TYPE);
// Start address (4LSB not included)
uint32_t start_address = cast_smem_ptr_to_uint(raw_pointer_cast(u128_tensor.data()));
desc.bitfield.start_address_ = static_cast<uint16_t>(start_address >> 4);
constexpr uint8_t base_offset = 0;
desc.bitfield.base_offset_ = base_offset;
// LayoutType meta
constexpr int W = LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE ? 1 :
LAYOUT_TYPE == GMMA::LayoutType::B32 ? 2 :
LAYOUT_TYPE == GMMA::LayoutType::B64 ? 4 :
LAYOUT_TYPE == GMMA::LayoutType::B128 ? 8 : -1;
if constexpr (MajorMode == GMMA::Major::MN)
{
/* In units of uint128_t, each GmmaDescriptor Major-MN describes a canonical layout of the form
*
* LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((1,n),(8,k)):((X,SBO),(1,LBO))
* LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((2,n),(8,k)):((1,LBO),(2,SBO))
* LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((4,n),(8,k)):((1,LBO),(4,SBO))
* LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,n),(8,k)):((1,LBO),(8,SBO))
*/
static_assert(size<1>(u128_tensor) == Int<(256 / cute::sizeof_bits<value_type>::value)>{}, // K size
"Not a canonical GMMA_MN Layout: Expected K-size 256/sizeof_bits<T>.");
// Construct the canonical GMMA T Layout with shape ((W,n),(8,2))
Layout canonical_layout = logical_divide(layout(u128_tensor), make_tile(Layout<Int<W>,_1>{}, Layout<Int<8>,_1>{}));
// Check ranks of canonical
CUTE_STATIC_ASSERT_V(rank<0>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_MN Layout: No flat offset mode");
CUTE_STATIC_ASSERT_V(rank<1>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_MN Layout: No flat offset mode");
// Check canonical mode strides
constexpr uint32_t stride_00 = stride<0,0>(canonical_layout);
constexpr uint32_t expected_stride_00 = LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE ? stride<0,0>(canonical_layout) : 1;
static_assert(stride_00 == expected_stride_00, "Not a canonical GMMA_MN Layout: Expected stride failure.");
constexpr uint32_t stride_10 = stride<1,0>(canonical_layout);
constexpr uint32_t expected_stride_10 = W;
static_assert(stride_10 == expected_stride_10, "Not a canonical GMMA_MN Layout: Expected stride failure.");
// stride dimension byte offset and leading dimension byte offset (4LSB not included == uint128_t units)
constexpr uint32_t stride_01 = stride<0,1>(canonical_layout);
constexpr uint32_t stride_11 = stride<1,1>(canonical_layout);
desc.bitfield.stride_byte_offset_ = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride_01 : stride_11;
desc.bitfield.leading_byte_offset_ = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride_11 : stride_01;
}
else if constexpr (MajorMode == GMMA::Major::K)
{
/* In units of uint128_t, each GmmaDescriptor Major-K describes a canonical layout of the form
*
* LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((8,n),2):((1,SBO),LBO)
* LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((8,n),2):((2,SBO),1)
* LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((8,n),2):((4,SBO),1)
* LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,n),2):((8,SBO),1)
*/
CUTE_STATIC_ASSERT_V(size<0>(u128_tensor) % Int<8>{} == Int<0>{}, // N|M size
"Not a canonical GMMA_K Layout: Expected MN-size multiple of 8.");
CUTE_STATIC_ASSERT_V(size<1>(u128_tensor) == Int<2>{}, // K size
"Not a canonical GMMA_K Layout: Expected K-size 2 (in units of uint128_t).");
// Construct the canonical GMMA N Layout with shape ((8,n),(2,1))
Layout canonical_layout = logical_divide(layout(u128_tensor), make_tile(Layout<_8,_1>{}, Layout<_2,_1>{}));
// Check ranks of canonical
CUTE_STATIC_ASSERT_V(rank<0>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_K Layout: No flat offset mode");
CUTE_STATIC_ASSERT_V(rank<1>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_K Layout: No flat offset mode");
// Check canonical mode strides
constexpr uint32_t stride_00 = stride<0,0>(canonical_layout);
constexpr uint32_t expected_stride_00 = W;
static_assert(stride_00 == expected_stride_00, "Not a canonical GMMA_K Layout: Expected stride failure.");
constexpr uint32_t stride_10 = stride<1,0>(canonical_layout);
constexpr uint32_t expected_stride_10 = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride<1,0>(canonical_layout) : 1;
static_assert(stride_10 == expected_stride_10, "Not a canonical GMMA_K Layout: Expected stride failure.");
// stride dimension byte offset and leading dimension byte offset (4LSB not included == uint128_t units)
constexpr uint32_t stride_01 = stride<0,1>(canonical_layout);
desc.bitfield.stride_byte_offset_ = stride_01;
desc.bitfield.leading_byte_offset_ = stride_10;
} else {
static_assert(MajorMode != GMMA::Major::MN && MajorMode != GMMA::Major::K, "Unrecognized MajorMode!");
}
#if 0
// DEBUG and SANITY
assert((start_address & 0b0000001111) == 0); // Must be 16B aligned (4LSB are 0) no negotiation
assert((start_address & 0b1110000000) == 0); // Assert base_offset is 0, generalize later
if (thread0()) {
print("smem_desc input tensor: "); print(tensor.data()); print(" o "); print(tensor.layout()); print("\n");
print("smem_desc uint128_t tensor: "); print(u128_tensor.data()); print(" o "); print(u128_tensor.layout()); print("\n");
//print(" desc canonical layout: "); print(canonical_layout); print("\n");
print(desc);
}
#endif
return desc;
}
///////////////////////////////////////////////////////////////////////////////
// Higher level GMMA Descriptor utilities
///////////////////////////////////////////////////////////////////////////////
struct DescriptorIterator
{
using reference = GmmaDescriptor;
using element_type = GmmaDescriptor;
using value_type = GmmaDescriptor;
GmmaDescriptor desc_;
// Dereference returns the GmmaDescriptor
CUTE_HOST_DEVICE constexpr
reference operator*() const { return desc_; }
// Advance and return a new GmmaDescriptor
template <class Index>
CUTE_HOST_DEVICE constexpr
reference operator[](Index const& i) const { return *(*this + i); }
// Return an advanced iterator
template <class Index>
CUTE_HOST_DEVICE constexpr
DescriptorIterator operator+(Index const& offset) const
{
return { GmmaDescriptor{desc_ + uint64_t(offset)} };
}
CUTE_HOST_DEVICE friend void
print(DescriptorIterator) { printf("GMMA::DescriptorIterator"); }
};
template <class T>
CUTE_HOST_DEVICE constexpr
GmmaDescriptor
raw_pointer_cast(DescriptorIterator const& ptr) {
return ptr.desc_;
}
// Recast a DescriptorIterator Tensor to uint64_t, it's RegType in mma_unpack
template <class NewT>
CUTE_HOST_DEVICE constexpr
DescriptorIterator
recast_ptr(DescriptorIterator const& iter) {
static_assert(is_same<NewT, uint64_t>::value, "Can only cast GmmaDescriptorIterator to uint64_t.");
return iter; // Do nothing, it will still dereference to GmmaDescriptor and decay to uint64_t
}
// The GMMA Traits below have custom fragment type flags for their smem desc tensors.
// These flags specialize a MakeTensor customization point to correctly make the fragment that is desired.
template <GMMA::Major>
struct smem_desc : DescriptorIterator {};
} // end namespace GMMA
// Customization point for creating a GMMA::smem_desc Tensor
template <GMMA::Major MajorMode>
struct MakeTensor<GMMA::smem_desc<MajorMode>>
{
template <class TEngine, class TLayout>
CUTE_HOST_DEVICE constexpr auto
operator()(Tensor<TEngine,TLayout> const& smem_tensor)
{
static_assert(is_smem<TEngine>::value, "Expected SMEM Tensor to construct a GMMA Desc Tensor");
return make_tensor(GMMA::DescriptorIterator{GMMA::make_gmma_desc<MajorMode>(tensor<0>(smem_tensor))},
replace<0>(recast<uint128_t const>(smem_tensor).layout(), Layout<_1,_0>{}));
}
};
///////////////////////////////////////////////////////////////////////////////
//////////////////////////// MMA_TRAITS ///////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
namespace GMMA {
// Accumulator layouts
using CLayout_64x8 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8>>>;
using CLayout_64x16 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _2>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x32 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _4>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x64 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _8>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x96 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _12>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x128 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _16>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x192 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _24>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
using CLayout_64x256 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _32>>,
Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>;
// Register source layout for 32-bit value types
using ALayout_64x8 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2, _2>>,
Stride<Stride< _64,_1,_16>,Stride< _8,_256>>>;
// Register source layout for 16-bit value types
using ALayout_64x16 = CLayout_64x16;
// Register source layout for 8-bit value types
using ALayout_64x32 = Layout<Shape <Shape < _4,_8, _4>,Shape < _4,_2, _2>>,
Stride<Stride<_256,_1,_16>,Stride<_64,_8,_1024>>>;
// Shared memory source layouts for any value type
template <int M, int K>
using ABLayout = Layout<Shape <_128,Shape <Int<M>,Int<K>>>,
Stride< _0,Stride< _1,Int<M>>>>;
} // namespace GMMA
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_8,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 8, 16>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_16,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 16, 16>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_32,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 32, 16>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_64,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 64, 16>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_96,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout< 96, 16>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_128,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<128, 16>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_192,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<192, 16>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<tnspA>;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 16>;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = bfloat16_t;
using ValTypeB = bfloat16_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<tnspB>;
using Shape_MNK = Shape<_64,_256,_16>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x16;
using BLayout = GMMA::ABLayout<256, 16>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout< 8, 8>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout< 8, 8>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout< 16, 8>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout< 16, 8>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout< 32, 8>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout< 32, 8>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout< 64, 8>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout< 64, 8>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout< 96, 8>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout< 96, 8>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout<128, 8>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout<128, 8>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout<192, 8>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout<192, 8>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x8_F32TF32TF32_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 8>;
using BLayout = GMMA::ABLayout<256, 8>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x8_F32TF32TF32_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = tfloat32_t;
using ValTypeB = tfloat32_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_8>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x8;
using BLayout = GMMA::ABLayout<256, 8>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32S8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32S8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32S8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32S8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = int8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32U8S8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32U8S8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = int8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32U8U8_SS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x8x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x16x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x32x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x64x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x96x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x128x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x192x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM90_64x256x32_S32U8U8_RS_TN>
{
using ValTypeD = int32_t;
using ValTypeA = uint8_t;
using ValTypeB = uint8_t;
using ValTypeC = int32_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e4m3_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e4m3_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x8x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_8,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 8, 32>;
using CLayout = GMMA::CLayout_64x8;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x16x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_16,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 16, 32>;
using CLayout = GMMA::CLayout_64x16;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x32x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_32,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 32, 32>;
using CLayout = GMMA::CLayout_64x32;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x64x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_64,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 64, 32>;
using CLayout = GMMA::CLayout_64x64;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x96x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_96,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout< 96, 32>;
using CLayout = GMMA::CLayout_64x96;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x128x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_128,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<128, 32>;
using CLayout = GMMA::CLayout_64x128;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x192x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_192,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<192, 32>;
using CLayout = GMMA::CLayout_64x192;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = half_t;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = half_t;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ABLayout< 64, 32>;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB>
struct MMA_Traits<SM90_64x256x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>>
{
using ValTypeD = float;
using ValTypeA = float_e5m2_t;
using ValTypeB = float_e5m2_t;
using ValTypeC = float;
using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>;
using Shape_MNK = Shape<_64,_256,_32>;
using ThrID = Layout<_128>;
using ALayout = GMMA::ALayout_64x32;
using BLayout = GMMA::ABLayout<256, 32>;
using CLayout = GMMA::CLayout_64x256;
GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| cutlass/include/cute/atom/mma_traits_sm90_gmma.hpp/0 | {
"file_path": "cutlass/include/cute/atom/mma_traits_sm90_gmma.hpp",
"repo_id": "cutlass",
"token_count": 73368
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
using CUTE_STL_NAMESPACE::integer_sequence;
using CUTE_STL_NAMESPACE::make_integer_sequence;
namespace detail {
template <class T, class S, T Begin>
struct range_impl;
template <class T, T... N, T Begin>
struct range_impl<T, integer_sequence<T, N...>, Begin> {
using type = integer_sequence<T, N+Begin...>;
};
template <class S>
struct reverse_impl;
template <class T, T... N>
struct reverse_impl<integer_sequence<T, N...>> {
using type = integer_sequence<T, sizeof...(N)-1-N...>;
};
} // end namespace detail
template <class T, T Begin, T End>
using make_integer_range = typename detail::range_impl<
T,
make_integer_sequence<T, (End-Begin > 0) ? (End-Begin) : 0>,
Begin>::type;
template <class T, T N>
using make_integer_sequence_reverse = typename detail::reverse_impl<
make_integer_sequence<T, N>>::type;
//
// Common aliases
//
// int_sequence
template <int... Ints>
using int_sequence = integer_sequence<int, Ints...>;
template <int N>
using make_int_sequence = make_integer_sequence<int, N>;
template <int N>
using make_int_rsequence = make_integer_sequence_reverse<int, N>;
template <int Begin, int End>
using make_int_range = make_integer_range<int, Begin, End>;
// index_sequence
template <size_t... Ints>
using index_sequence = integer_sequence<size_t, Ints...>;
template <size_t N>
using make_index_sequence = make_integer_sequence<size_t, N>;
template <size_t N>
using make_index_rsequence = make_integer_sequence_reverse<size_t, N>;
template <size_t Begin, size_t End>
using make_index_range = make_integer_range<size_t, Begin, End>;
//
// Shortcuts
//
template <int... Ints>
using seq = int_sequence<Ints...>;
template <int N>
using make_seq = make_int_sequence<N>;
template <int N>
using make_rseq = make_int_rsequence<N>;
template <int Min, int Max>
using make_range = make_int_range<Min, Max>;
template <class Tuple>
using tuple_seq = make_seq<tuple_size<remove_cvref_t<Tuple>>::value>;
template <class Tuple>
using tuple_rseq = make_rseq<tuple_size<remove_cvref_t<Tuple>>::value>;
//
// Specialize cute::tuple-traits for std::integer_sequence
//
template <class T, T... Ints>
struct tuple_size<integer_sequence<T, Ints...>>
: cute::integral_constant<size_t, sizeof...(Ints)>
{};
template <size_t I, class T, T... Is>
struct tuple_element<I, integer_sequence<T, Is...>>
{
constexpr static T idx[sizeof...(Is)] = {Is...};
using type = cute::integral_constant<T, idx[I]>;
};
template <size_t I, class T, T... Ints>
CUTE_HOST_DEVICE constexpr
tuple_element_t<I, integer_sequence<T, Ints...>>
get(integer_sequence<T, Ints...>) {
static_assert(I < sizeof...(Ints), "Index out of range");
return {};
}
} // end namespace cute
| cutlass/include/cute/numeric/integer_sequence.hpp/0 | {
"file_path": "cutlass/include/cute/numeric/integer_sequence.hpp",
"repo_id": "cutlass",
"token_count": 1516
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/**
* \file
* \brief Debugging and logging functionality
*/
#include <cuda_runtime_api.h>
#include <cute/config.hpp>
namespace cute
{
/******************************************************************************
* Debug and logging macros
******************************************************************************/
/**
* Formats and prints the given message to stdout
*/
#if !defined(CUTE_LOG)
# if !defined(__CUDA_ARCH__)
# define CUTE_LOG(format, ...) printf(format, __VA_ARGS__)
# else
# define CUTE_LOG(format, ...) \
printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \
blockIdx.x, blockIdx.y, blockIdx.z, \
threadIdx.x, threadIdx.y, threadIdx.z, \
__VA_ARGS__);
# endif
#endif
/**
* Formats and prints the given message to stdout only if DEBUG is defined
*/
#if !defined(CUTE_LOG_DEBUG)
# ifdef DEBUG
# define CUTE_LOG_DEBUG(format, ...) CUTE_LOG(format, __VA_ARGS__)
# else
# define CUTE_LOG_DEBUG(format, ...)
# endif
#endif
/**
* \brief Perror macro with exit
*/
#if !defined(CUTE_ERROR_EXIT)
# define CUTE_ERROR_EXIT(e) \
do { \
cudaError_t code = (e); \
if (code != cudaSuccess) { \
fprintf(stderr, "<%s:%d> %s:\n %s: %s\n", \
__FILE__, __LINE__, #e, \
cudaGetErrorName(code), cudaGetErrorString(code)); \
fflush(stderr); \
exit(1); \
} \
} while (0)
#endif
#if !defined(CUTE_CHECK_LAST)
# define CUTE_CHECK_LAST() CUTE_ERROR_EXIT(cudaPeekAtLastError()); CUTE_ERROR_EXIT(cudaDeviceSynchronize())
#endif
#if !defined(CUTE_CHECK_ERROR)
# define CUTE_CHECK_ERROR(e) CUTE_ERROR_EXIT(e)
#endif
// A dummy function that uses compilation failure to print a type
template <class... T>
CUTE_HOST_DEVICE void
print_type() {
static_assert(sizeof...(T) < 0, "Printing type T.");
}
template <class... T>
CUTE_HOST_DEVICE void
print_type(T&&...) {
static_assert(sizeof...(T) < 0, "Printing type T.");
}
//
// Device-specific helpers
//
// e.g.
// if (thread0()) print(...);
// if (block0()) print(...);
// if (thread(42)) print(...);
CUTE_HOST_DEVICE
bool
block(int bid)
{
#if defined(__CUDA_ARCH__)
return blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y == bid;
#else
return true;
#endif
}
CUTE_HOST_DEVICE
bool
thread(int tid, int bid)
{
#if defined(__CUDA_ARCH__)
return (threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y == tid) && block(bid);
#else
return true;
#endif
}
CUTE_HOST_DEVICE
bool
thread(int tid)
{
return thread(tid,0);
}
CUTE_HOST_DEVICE
bool
thread0()
{
return thread(0,0);
}
CUTE_HOST_DEVICE
bool
block0()
{
return block(0);
}
} // end namespace cute
| cutlass/include/cute/util/debug.hpp/0 | {
"file_path": "cutlass/include/cute/util/debug.hpp",
"repo_id": "cutlass",
"token_count": 2060
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))
#define CUTLASS_ARCH_MMA_SM80_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
#define CUTLASS_ARCH_MMA_SM80_ENABLED
#if (__CUDA_ARCH__ <= 900)
#define CUTLASS_ARCH_MMA_B1_AND_SM80_ENABLED
#endif
#if (__CUDA_ARCH__ <= 890)
#define CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED
#endif
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - Float BF16, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F32 = bf16 * bf16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
bfloat16_t,
layout::RowMajor,
bfloat16_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 4>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1684 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 4>,
32,
tfloat32_t,
layout::RowMajor,
tfloat32_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 4>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 2>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 1>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct Mma<gemm::GemmShape<16, 8, 8>, 32, tfloat32_t, layout::RowMajor,
tfloat32_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 4>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
uint32_t const *C = reinterpret_cast<uint32_t const *>(&c);
uint32_t *D = reinterpret_cast<uint32_t *>(&d);
asm volatile("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3,%4,%5}, {%6,%7}, {%8,%9};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = bf16 * bf16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
bfloat16_t,
layout::RowMajor,
bfloat16_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 8>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, "
"{%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 884 - F64
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<8,8,4>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,4>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 1>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 1>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
double const & A = reinterpret_cast<double const &>(a);
double const & B = reinterpret_cast<double const &>(b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=d"(D[0]), "=d"(D[1])
: "d"(A), "d"(B), "d"(C[0]), "d"(C[1]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const * A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const * B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16864 - S4 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16864 - S4 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const * A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const * B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 168256 - B1 input, S32 accumulation - AND,POPC
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = B1 & B1 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,256>,
32,
cutlass::uint1b_t,
layout::RowMajor,
cutlass::uint1b_t,
layout::ColumnMajor,
int32_t,
layout::RowMajor,
OpAndPopc> {
using Shape = gemm::GemmShape<16,8,256>;
using ElementA = cutlass::uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint1b_t, 128>;
using ElementB = cutlass::uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint1b_t, 64>;
using ElementC = int32_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int32_t, 4>;
using Operator = OpAndPopc;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_B1_AND_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.and.popc {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = B1 & B1 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,256>,
32,
cutlass::uint1b_t,
layout::RowMajor,
cutlass::uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,256>;
using ElementA = cutlass::uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint1b_t, 128>;
using ElementB = cutlass::uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint1b_t, 64>;
using ElementC = int32_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int32_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_B1_AND_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.and.popc {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 168256 - B1 input, S32 accumulation - XOR,POPC
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = B1 & B1 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,256>,
32,
cutlass::uint1b_t,
layout::RowMajor,
cutlass::uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpXorPopc> {
using Shape = gemm::GemmShape<16,8,256>;
using ElementA = cutlass::uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint1b_t, 128>;
using ElementB = cutlass::uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint1b_t, 64>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpXorPopc;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif // defined(CUTLASS_ARCH_MMA_B1_XOR_SM80_ENABLED)
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/arch/mma_sm80.h/0 | {
"file_path": "cutlass/include/cutlass/arch/mma_sm80.h",
"repo_id": "cutlass",
"token_count": 25902
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implementation of a CTA-wide barrier for inter-CTA synchronization.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/barrier.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace detail {
//
// Utilities for abstracting synchronization methods for barriers
//
struct SyncthreadsSync {
CUTLASS_DEVICE
static void sync() {
__syncthreads();
}
};
struct SyncwarpSync {
CUTLASS_DEVICE
static void sync() {
__syncwarp();
}
};
template <
int ThreadCount,
int BarrierId
>
struct NamedBarrierSync {
CUTLASS_DEVICE
static void sync() {
cutlass::arch::NamedBarrier::sync(ThreadCount, static_cast<arch::ReservedNamedBarriers>(BarrierId));
}
};
} // namepspace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Group or CTA-wide semaphore for inter-CTA synchronization.
template <class Sync>
struct GenericBarrier {
public:
/// Flag type
using T = int;
/// Initial flag value
static const T INIT = 0;
protected:
/// Load flag, as a strong acquire operation (int specialization)
CUTLASS_DEVICE
static int ld_acquire(int *ptr)
{
int state = 0;
#if (__CUDA_ARCH__ >= 700)
/// SM70 and newer use memory consistency qualifiers
// Acquire pattern using acquire modifier
asm volatile ("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#else
asm volatile ("ld.cg.global.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#endif // (__CUDA_ARCH__ >= 700)
return state;
}
/// Reduce into flag, with release pattern (int specialization)
CUTLASS_DEVICE
static void red_release(int *ptr, int val)
{
#if (__CUDA_ARCH__ >= 700)
/// SM70 and newer use memory consistency qualifiers
// Release pattern using acq_rel fence + relaxed modifier. (The fence also releases data
// that was weakly-written by other threads prior to the last syncthreads)
asm volatile ("fence.acq_rel.gpu;\n");
asm volatile ("red.relaxed.gpu.global.add.s32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#else
__threadfence();
atomicAdd(ptr, val);
#endif // (__CUDA_ARCH__ >= 700)
}
public:
/// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_lt(void *lock_ptr, int thread_idx, int flag_idx, int count)
{
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(ld_acquire(flag_ptr) < count) {}
}
Sync::sync();
}
/// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_eq(void *lock_ptr, int thread_idx, int flag_idx, T val = 1)
{
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(ld_acquire(flag_ptr) != val) {}
}
Sync::sync();
}
/// Uses thread[0] to wait for the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_eq_reset(void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(atomicCAS(flag_ptr, val, 0) != val) {}
}
Sync::sync();
}
/// Increment the arrival count for a flag
CUTLASS_DEVICE
static void arrive_inc(void *lock_ptr, int thread_idx, int flag_idx, int val = 1)
{
T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
Sync::sync();
if (thread_idx == 0)
{
red_release(flag_ptr, val);
}
}
/// Increment the arrival counts for a range of flags
CUTLASS_DEVICE
static void arrive_range_inc(void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1)
{
int flag_idx = first_flag_idx + thread_idx;
T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
// Barrier to make sure all other threads in group have written their data
Sync::sync();
// Select threads increment their flags
if (thread_idx < count) {
red_release(flag_ptr, val);
}
}
};
using Barrier = GenericBarrier<detail::SyncthreadsSync>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/** Structure for managing multiple NamedBarriers to be used by different warp groups, allowing
* runtime index values to be used to call into named barriers with compile-time-constant IDs.
*
* @param ThreadCount_ Number of threads that will wait on a NamedBarrier with a given ID
* @param Offset Value added to the ID passed in by the user to determine the NamedBarrier ID to call into
* @param MaxNumNamedBarriers The maximum number of unique barrier IDs that will be requested on this type
**/
template <
uint32_t ThreadCount_,
uint32_t Offset = 0,
uint32_t MaxNumNamedBarriers = 16
>
struct NamedBarrierManager {
static_assert(MaxNumNamedBarriers <= arch::NamedBarrier::HardwareMaxNumNamedBarriers);
static_assert(MaxNumNamedBarriers + Offset <= arch::NamedBarrier::HardwareMaxNumNamedBarriers, "Barrier IDs cannot exceed 15");
// Number of threads participating in the barrier
static constexpr uint32_t ThreadCount = ThreadCount_;
template <uint32_t BarrierId>
using BarrierSync = cutlass::GenericBarrier<cutlass::detail::NamedBarrierSync<ThreadCount, BarrierId>>;
// Underlying type used by all barriers for synchronization. Does not depend on
// template parameter BarrierId, so passing in 0 suffices.
using T = typename BarrierSync<0>::T;
using IntegerSequence = cute::make_integer_sequence<uint32_t, MaxNumNamedBarriers>;
CUTLASS_DEVICE
static
void wait_lt(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count) {
wait_lt_helper(idx, lock_ptr, thread_idx, flag_idx, count, IntegerSequence{});
}
CUTLASS_DEVICE
static void
wait_eq(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
wait_eq_helper<false>(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
}
CUTLASS_DEVICE
static void
wait_eq_reset(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
wait_eq_helper<true>(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
}
CUTLASS_DEVICE
static void
arrive_inc(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) {
arrive_inc_helper(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
}
CUTLASS_DEVICE
static void
arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) {
arrive_range_inc_helper(idx, lock_ptr, thread_idx, first_flag_idx, count, val, IntegerSequence{});
}
private:
CUTLASS_DEVICE
static void
check_barrier_in_range([[maybe_unused]] uint32_t idx) {
assert((idx >= MaxNumNamedBarriers) && "Index exceeds barrier count");
}
template <uint32_t... Idx>
CUTLASS_DEVICE
static void
wait_lt_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count, cute::integer_sequence<uint32_t, Idx...>) {
check_barrier_in_range(idx);
((Idx == idx && (BarrierSync<Idx + Offset>::wait_lt(lock_ptr, thread_idx, flag_idx, count), true)) || ...);
}
template <bool Reset, uint32_t... Idx>
CUTLASS_DEVICE
static void
wait_eq_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val, cute::integer_sequence<uint32_t, Idx...>) {
check_barrier_in_range(idx);
if constexpr (Reset) {
((Idx == idx && (BarrierSync<Idx + Offset>::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
}
else {
((Idx == idx && (BarrierSync<Idx + Offset>::wait_eq(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
}
}
template <uint32_t... Idx>
CUTLASS_DEVICE
static void
arrive_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val, cute::integer_sequence<uint32_t, Idx...>) {
check_barrier_in_range(idx);
((Idx == idx && (BarrierSync<Idx + Offset>::arrive_inc(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
}
template <uint32_t... Idx>
CUTLASS_DEVICE
static void
arrive_range_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count, int val, cute::integer_sequence<uint32_t, Idx...>) {
check_barrier_in_range(idx);
((Idx == idx && (BarrierSync<Idx + Offset>::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val), true)) || ...);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/** Structure for synchronizing via contiguous barriers (e.g., __syncwarp, __syncthreads)
* via an API that mirrors that of NamedBarrierManager
*
* @param Synchronizer Synchronization helper exposing a `sync()` method to perform synchronization
**/
template <
class Synchronizer,
uint32_t ThreadCount_
>
struct SyncManager {
// Number of threads participating in the barrier
static constexpr uint32_t ThreadCount = ThreadCount_;
using BarrierSync = cutlass::GenericBarrier<Synchronizer>;
// Underlying type used by all barriers for synchronization.
using T = typename BarrierSync::T;
CUTLASS_DEVICE
static
void wait_lt(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int count) {
BarrierSync::wait_lt(lock_ptr, thread_idx, flag_idx, count);
}
CUTLASS_DEVICE
static void
wait_eq(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
BarrierSync::wait_eq(lock_ptr, thread_idx, flag_idx, val);
}
CUTLASS_DEVICE
static void
wait_eq_reset(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
BarrierSync::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val);
}
CUTLASS_DEVICE
static void
arrive_inc(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) {
BarrierSync::arrive_inc(lock_ptr, thread_idx, flag_idx, val);
}
CUTLASS_DEVICE
static void
arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) {
BarrierSync::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/barrier.h/0 | {
"file_path": "cutlass/include/cutlass/barrier.h",
"repo_id": "cutlass",
"token_count": 4279
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem shapes.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/conv/convolution.h"
#include "cute/container/array.hpp"
#if ! defined(__CUDACC_RTC__)
#include <initializer_list>
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
// Implements the user facing argument for all CUTLASS 3.x convolutions in a rank agnostic fashion.
// All tensors are flat and by default treated as layout right (NDHWC, KTRSC, NZPQK)
// Supports asymmetric padding, traversal strides, dilations, and all conv algorithm types.
template <
conv::Operator ConvOp_,
int NumSpatialDimensions
>
struct ConvProblemShape {
//
// Alias types for members
//
static constexpr int RankS = NumSpatialDimensions;
static constexpr int RankT = NumSpatialDimensions + 2;
static constexpr conv::Operator ConvOp = ConvOp_;
using SpatialExtent = cute::array<int, RankS>;
using TensorExtent = cute::array<int, RankT>;
using TensorStride = cute::array<int64_t, RankT>;
using ShapePadding = SpatialExtent;
using TraversalStride = SpatialExtent;
using ShapeDilation = SpatialExtent;
using Corner = SpatialExtent;
//
// Members
//
cutlass::conv::Mode mode{};
TensorExtent shape_A{};
TensorStride stride_A{};
TensorExtent shape_B{};
TensorStride stride_B{};
TensorExtent shape_C{};
TensorStride stride_C{};
// asymmetric padding, both upper and lower padding must be >= 0
ShapePadding lower_padding{};
ShapePadding upper_padding{};
TraversalStride traversal_stride{};
ShapeDilation dilation{};
int groups = 1;
//
// Methods
//
ConvProblemShape() = default;
// Constructor accepts user facing arguments and computes to stores the corners as its internal state
ConvProblemShape(
conv::Mode mode, // convolution/cross-correlation
TensorExtent shape_act, // [n,d,h,w,c]
TensorStride stride_act, // [n,d,h,w,c]
TensorExtent shape_flt, // [k,t,r,s,c]
TensorStride stride_flt, // [k,t,r,s,c]
ShapePadding lower_padding, // [pad_d, pad_h, pad_w]
ShapePadding upper_padding, // [pad_d, pad_h, pad_w]
TraversalStride tstride, // [stride_d, stride_h, stride_w]
ShapeDilation dilation, // [dilation_d, dilation_h, dilation_w]
int groups)
: mode(mode)
, lower_padding(lower_padding)
, upper_padding(upper_padding)
, traversal_stride(tstride)
, dilation(dilation)
, groups(groups) {
auto [shape_xformed_act, stride_xformed_act] = calculate_xformed_act(shape_act, shape_flt);
set_shape_stride_ABC(shape_act, stride_act, shape_flt, stride_flt, shape_xformed_act, stride_xformed_act);
}
// Allow user input of xformed activation stride to support non-packed strides.
ConvProblemShape(
conv::Mode mode, // convolution/cross-correlation
TensorExtent shape_act, // [n,d,h,w,c]
TensorStride stride_act, // [n,d,h,w,c]
TensorExtent shape_flt, // [k,t,r,s,c]
TensorStride stride_flt, // [k,t,r,s,c]
TensorStride stride_xformed_act, // [n,z,p,q,k]
ShapePadding lower_padding, // [pad_d, pad_h, pad_w]
ShapePadding upper_padding, // [pad_d, pad_h, pad_w]
TraversalStride tstride, // [stride_d, stride_h, stride_w]
ShapeDilation dilation, // [dilation_d, dilation_h, dilation_w]
int groups)
: mode(mode)
, lower_padding(lower_padding)
, upper_padding(upper_padding)
, traversal_stride(tstride)
, dilation(dilation)
, groups(groups) {
CUTLASS_ASSERT(stride_act[RankT - 1] == 1);
CUTLASS_ASSERT(stride_flt[RankT - 1] == 1);
CUTLASS_ASSERT(stride_xformed_act[RankT - 1] == 1);
auto stride_act_packed = packed_stride_right_major(shape_act);
auto stride_flt_packed = packed_stride_right_major(shape_flt);
auto [shape_xformed_act, stride_xformed_act_packed] = calculate_xformed_act(shape_act, shape_flt);
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < RankT - 1; ++i) {
CUTLASS_ASSERT(stride_act[i] >= stride_act_packed[i]);
CUTLASS_ASSERT(stride_flt[i] >= stride_flt_packed[i]);
CUTLASS_ASSERT(stride_xformed_act[i] >= stride_xformed_act_packed[i]);
}
set_shape_stride_ABC(shape_act, stride_act, shape_flt, stride_flt, shape_xformed_act, stride_xformed_act);
}
// Constructor accepts user facing arguments and presume packed tensor strides in canonical (CWHDN) order.
ConvProblemShape(
conv::Mode mode,
TensorExtent shape_act,
TensorExtent shape_flt,
ShapePadding lower_padding,
ShapePadding upper_padding,
TraversalStride tstride,
ShapeDilation dilation,
int groups)
: ConvProblemShape(
mode,
shape_act,
packed_stride_right_major(shape_act),
shape_flt,
packed_stride_right_major(shape_flt),
lower_padding,
upper_padding,
tstride,
dilation,
groups) {
}
#if ! defined(__CUDACC_RTC__)
// Constructor accepts user facing arguments and computes to stores the corners as its internal state
ConvProblemShape(
conv::Mode mode,
std::initializer_list<int> shape_act_,
std::initializer_list<int64_t> stride_act_,
std::initializer_list<int> shape_flt_,
std::initializer_list<int64_t> stride_flt_,
std::initializer_list<int> lower_padding_,
std::initializer_list<int> upper_padding_,
std::initializer_list<int> traversal_stride_,
std::initializer_list<int> dilation_,
int groups)
: mode(mode)
, groups(groups) {
TensorExtent shape_act{};
TensorStride stride_act{};
TensorExtent shape_flt{};
TensorStride stride_flt{};
assert(shape_act_.size() == shape_act.size());
assert(stride_act_.size() == stride_act.size());
assert(shape_flt_.size() == shape_flt.size());
assert(stride_flt_.size() == stride_flt.size());
assert(lower_padding_.size() == lower_padding.size());
assert(upper_padding_.size() == upper_padding.size());
assert(traversal_stride_.size() == traversal_stride.size());
assert(dilation_.size() == dilation.size());
std::copy(shape_act_.begin(), shape_act_.end(), shape_act.begin());
std::copy(stride_act_.begin(), stride_act_.end(), stride_act.begin());
std::copy(shape_flt_.begin(), shape_flt_.end(), shape_flt.begin());
std::copy(stride_flt_.begin(), stride_flt_.end(), stride_flt.begin());
std::copy(lower_padding_.begin(), lower_padding_.end(), lower_padding.begin());
std::copy(upper_padding_.begin(), upper_padding_.end(), upper_padding.begin());
std::copy(traversal_stride_.begin(), traversal_stride_.end(), traversal_stride.begin());
std::copy(dilation_.begin(), dilation_.end(), dilation.begin());
auto [shape_xformed_act, stride_xformed_act] = calculate_xformed_act(shape_act, shape_flt);
set_shape_stride_ABC(shape_act, stride_act, shape_flt, stride_flt, shape_xformed_act, stride_xformed_act);
}
// Allow user input of xformed activation stride to support non-packed strides.
ConvProblemShape(
conv::Mode mode,
std::initializer_list<int> shape_act_,
std::initializer_list<int64_t> stride_act_,
std::initializer_list<int> shape_flt_,
std::initializer_list<int64_t> stride_flt_,
std::initializer_list<int64_t> stride_xformed_act_,
std::initializer_list<int> lower_padding_,
std::initializer_list<int> upper_padding_,
std::initializer_list<int> traversal_stride_,
std::initializer_list<int> dilation_,
int groups)
: mode(mode)
, groups(groups) {
TensorExtent shape_act{};
TensorStride stride_act{};
TensorExtent shape_flt{};
TensorStride stride_flt{};
TensorStride stride_xformed_act{};
std::copy(shape_act_.begin(), shape_act_.end(), shape_act.begin());
std::copy(stride_act_.begin(), stride_act_.end(), stride_act.begin());
std::copy(shape_flt_.begin(), shape_flt_.end(), shape_flt.begin());
std::copy(stride_flt_.begin(), stride_flt_.end(), stride_flt.begin());
std::copy(stride_xformed_act_.begin(), stride_xformed_act_.end(), stride_xformed_act.begin());
std::copy(lower_padding_.begin(), lower_padding_.end(), lower_padding.begin());
std::copy(upper_padding_.begin(), upper_padding_.end(), upper_padding.begin());
std::copy(traversal_stride_.begin(), traversal_stride_.end(), traversal_stride.begin());
std::copy(dilation_.begin(), dilation_.end(), dilation.begin());
CUTLASS_ASSERT(stride_act[RankT - 1] == 1);
CUTLASS_ASSERT(stride_flt[RankT - 1] == 1);
CUTLASS_ASSERT(stride_xformed_act[RankT - 1] == 1);
auto stride_act_packed = packed_stride_right_major(shape_act);
auto stride_flt_packed = packed_stride_right_major(shape_flt);
auto [shape_xformed_act, stride_xformed_act_packed] = calculate_xformed_act(shape_act, shape_flt);
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < RankT - 1; ++i) {
CUTLASS_ASSERT(stride_act[i] >= stride_act_packed[i]);
CUTLASS_ASSERT(stride_flt[i] >= stride_flt_packed[i]);
CUTLASS_ASSERT(stride_xformed_act[i] >= stride_xformed_act_packed[i]);
}
set_shape_stride_ABC(shape_act, stride_act, shape_flt, stride_flt, shape_xformed_act, stride_xformed_act);
}
// Constructor accepts user facing arguments and computes to stores the corners as its internal state
ConvProblemShape(
conv::Mode mode,
std::initializer_list<int> shape_act_,
std::initializer_list<int> shape_flt_,
std::initializer_list<int> lower_padding_,
std::initializer_list<int> upper_padding_,
std::initializer_list<int> traversal_stride_,
std::initializer_list<int> dilation_,
int groups)
: mode(mode)
, groups(groups) {
TensorExtent shape_act{};
TensorStride stride_act{};
TensorExtent shape_flt{};
TensorStride stride_flt{};
assert(shape_act_.size() == shape_act.size());
assert(shape_flt_.size() == shape_flt.size());
assert(lower_padding_.size() == lower_padding.size());
assert(upper_padding_.size() == upper_padding.size());
assert(traversal_stride_.size() == traversal_stride.size());
assert(dilation_.size() == dilation.size());
std::copy(shape_act_.begin(), shape_act_.end(), shape_act.begin());
std::copy(shape_flt_.begin(), shape_flt_.end(), shape_flt.begin());
std::copy(lower_padding_.begin(), lower_padding_.end(), lower_padding.begin());
std::copy(upper_padding_.begin(), upper_padding_.end(), upper_padding.begin());
std::copy(traversal_stride_.begin(), traversal_stride_.end(), traversal_stride.begin());
std::copy(dilation_.begin(), dilation_.end(), dilation.begin());
stride_act = packed_stride_right_major(shape_act);
stride_flt = packed_stride_right_major(shape_flt);
auto [shape_xformed_act, stride_xformed_act] = calculate_xformed_act(shape_act, shape_flt);
set_shape_stride_ABC(shape_act, stride_act, shape_flt, stride_flt, shape_xformed_act, stride_xformed_act);
}
#endif // not defined(__CUDACC_RTC__)
// Set shape and stride of tensor A/B/C according to following table:
// | | Fprop | Dgrad | Wgrad |
// | ------ | ------ | ------ | ------|
// | ShapeA | NDHWC | NZPQK | NZPQK |
// | ShapeB | KTRSC | KTRSC | NDHWC |
// | ShapeC | NZPQK | NDHWC | KTRSC |
//
CUTLASS_HOST_DEVICE
constexpr void
set_shape_stride_ABC(
TensorExtent shape_act,
TensorStride stride_act,
TensorExtent shape_flt,
TensorStride stride_flt,
TensorExtent shape_xformed_act,
TensorStride stride_xformed_act) {
if constexpr (ConvOp == cutlass::conv::Operator::kFprop) {
shape_A = shape_act;
stride_A = stride_act;
shape_B = shape_flt;
stride_B = stride_flt;
shape_C = shape_xformed_act;
stride_C = stride_xformed_act;
}
else if constexpr (ConvOp == cutlass::conv::Operator::kDgrad) {
shape_A = shape_xformed_act;
stride_A = stride_xformed_act;
shape_B = shape_flt;
stride_B = stride_flt;
shape_C = shape_act;
stride_C = stride_act;
}
else if constexpr (ConvOp == cutlass::conv::Operator::kWgrad) {
shape_A = shape_xformed_act;
stride_A = stride_xformed_act;
shape_B = shape_act;
stride_B = stride_act;
shape_C = shape_flt;
stride_C = stride_flt;
}
}
// Get problem shape MNK according to following table:
// | | Fprop | Dgrad | Wgrad |
// | ---- | --------- | -------- | -------- |
// | Shape_M | (Q,P,Z,N) | (W,H,D,N) | (K) |
// | Shape_N | (K) | (C) | (C,S,R,T) |
// | Shape_K | (C,S,R,T) | (K,S,R,T) | (Q,P,Z,N) |
CUTLASS_HOST_DEVICE
constexpr auto
get_transformed_problem_shape_MNK() const {
using cute::insert;
using cute::make_shape;
using cute::reverse;
using cute::take;
if constexpr (ConvOp == conv::Operator::kWgrad) {
auto M_xformed = shape_C[0];
auto N_xformed = reverse(take<1, RankT>(shape_C));
auto K_xformed = reverse(take<0, RankT - 1>(shape_A));
return make_shape(M_xformed, N_xformed, K_xformed);
}
else if constexpr (ConvOp == conv::Operator::kFprop){
auto M_xformed = reverse(take<0, RankT - 1>(shape_C));
auto N_xformed = shape_C[RankT - 1];
auto K_xformed = reverse(take<1, RankT>(shape_B));
return make_shape(M_xformed, N_xformed, K_xformed);
}
else if constexpr (ConvOp == conv::Operator::kDgrad) {
auto M_xformed = reverse(take<0,RankT - 1>(shape_C));
auto N_xformed = shape_C[RankT - 1];
// shape_B: [K,T,R,S,C], K_xformed: [K,S,R,T]
auto K_xformed = insert<0>(
(reverse(take<1,RankT - 1>(shape_B))),
shape_B[0]);
return make_shape(M_xformed, N_xformed, K_xformed);
}
}
// Get A extents.
// fprop: A extents array contains [N,D,H,W,C]. Turn that into ((W,H,D,N), (C))
// wgrad: A extents array contains [N,Z,P,Q,K]. Turn that into ((K), (Q,P,Z,N))
// dgrad: A extents array contains [N,Z,P,Q,K]. Turn that into ((Q,P,Z,N), (K))
CUTLASS_HOST_DEVICE
constexpr auto
get_shape_A() const {
using cute::make_shape;
using cute::take;
if constexpr (ConvOp == conv::Operator::kFprop ||
ConvOp == conv::Operator::kDgrad) {
return make_shape(
cute::reverse(take<0, RankT - 1>(shape_A)),
shape_A[RankT - 1]);
}
// For wgrad kernel, we need to linearize NZPQ for tensor A
else if constexpr (ConvOp == conv::Operator::kWgrad) {
return make_shape(
shape_A[RankT - 1],
cute::product(take<0, RankT - 1>(shape_A)));
}
}
// Get B extents.
// fprop: B extents array contains [K,T,R,S,C]. Turn that into ((K), (C,S,R,T))
// wgrad: B extents array contains [N,D,H,W,C]. Turn that into ((C), (W,H,D,N))
// dgrad: B extents array contains [K,T,R,S,C]. Turn that into ((C), (K,S,R,T))
CUTLASS_HOST_DEVICE
constexpr auto
get_shape_B() const {
using cute::make_shape;
using cute::reverse;
using cute::take;
if constexpr (ConvOp == conv::Operator::kFprop) {
return make_shape(
shape_B[0],
reverse(take<1, RankT>(shape_B)));
}
else if constexpr (ConvOp == conv::Operator::kWgrad) {
return make_shape(
shape_B[RankT - 1],
reverse(take<0, RankT - 1>(shape_B)));
}
else if constexpr (ConvOp == conv::Operator::kDgrad) {
// shape_B: [K,T,R,S,C], return: [(C),(K,S,R,T)]
return make_shape(
shape_B[RankT - 1],
cute::insert<0>(
reverse(take<1, RankT - 1>(shape_B)),
shape_B[0]));
}
}
// Static method that returns the canonical strides of tensors (layouts are right major and compact)
CUTLASS_HOST_DEVICE
static constexpr TensorStride
packed_stride_right_major(TensorExtent const& extents) {
TensorStride strides{};
strides[RankT-1] = 1;
cute::for_each(cute::make_rseq<RankT-1>{}, [&](auto i) {
strides[i] = extents[i+1] * strides[i+1];
});
return strides;
}
// Static method that returns the packed logical size of any TensorExtent
CUTLASS_HOST_DEVICE
static constexpr size_t
size(TensorExtent const& extents) {
size_t size = 1;
cute::for_each(cute::make_seq<RankT>{}, [&](auto i) {
size *= extents[i];
});
return size;
}
CUTLASS_HOST_DEVICE
constexpr size_t
size_A() const {
return shape_A[0] * stride_A[0];
}
CUTLASS_HOST_DEVICE
constexpr size_t
size_B() const {
return shape_B[0] * stride_B[0];
}
CUTLASS_HOST_DEVICE
constexpr size_t
size_C() const {
return shape_C[0] * stride_C[0];
}
// Equality operator
CUTLASS_HOST_DEVICE
bool operator==(ConvProblemShape<ConvOp, NumSpatialDimensions> const& rhs) const {
using cute::for_each;
using cute::make_seq;
bool is_equal = true;
// Compare all tensor extents
for_each(make_seq<RankT>{}, [&](auto i) {
is_equal = is_equal
&& (shape_A[i] == rhs.shape_A[i])
&& (shape_B[i] == rhs.shape_B[i]);
});
// Compare all spatial extents
for_each(make_seq<RankS>{}, [&](auto i) {
is_equal = is_equal
&& (lower_padding[i] == rhs.lower_padding[i])
&& (upper_padding[i] == rhs.upper_padding[i])
&& (traversal_stride[i] == rhs.traversal_stride[i])
&& (dilation[i] == rhs.dilation[i]);
});
return is_equal;
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(ConvProblemShape<ConvOp, NumSpatialDimensions> const &rhs) const {
return !(*this == rhs);
}
private:
CUTLASS_HOST_DEVICE
constexpr auto
calculate_xformed_act(TensorExtent shape_act, TensorExtent shape_flt) {
TensorExtent shape_xformed_act{};
// calculate n,z,p,q,k.
// a helper lambda to compute a single spatial extent of the nzpqk tensor
auto nzpqk_extent = [](int act_ext, int filter_ext, int pad_total, int dilation, int tstride) {
return 1 + (act_ext + pad_total - ((filter_ext -1) * dilation + 1)) / tstride;
};
shape_xformed_act[0] = shape_act[0]; // Activation N extent
cute::for_each(cute::make_seq<RankS>{}, [&](auto i) {
shape_xformed_act[i+1] = nzpqk_extent(
shape_act[i+1], shape_flt[i+1], upper_padding[i] + lower_padding[i], dilation[i], traversal_stride[i]);
});
shape_xformed_act[RankT-1] = shape_flt[0]; // Filter K extent
TensorStride stride_xformed_act = packed_stride_right_major(shape_xformed_act);
return cute::make_tuple(shape_xformed_act, stride_xformed_act);
}
};
template<
conv::Operator ConvOp,
int SpatialDim
>
void print(ConvProblemShape<ConvOp, SpatialDim> const& problem) {
printf("ConvProblemShape with %d spatial dimensions implementing cutlass::conv::Operator::%d\n",
SpatialDim, int(ConvOp));
printf("\tTensorA: ");
cute::print(problem.shape_A); printf(":");
cute::print(problem.stride_A); printf("\n");
printf("\tTensorB: ");
cute::print(problem.shape_B); printf(":");
cute::print(problem.stride_B); printf("\n");
printf("\tTensorC: ");
cute::print(problem.shape_C); printf(":");
cute::print(problem.stride_C); printf("\n");
printf("\tLower padding: "); print(problem.lower_padding); printf("\n");
printf("\tUpper padding: "); print(problem.upper_padding); printf("\n");
printf("\tTraversal strides: "); print(problem.traversal_stride); printf("\n");
printf("\tDilation: "); print(problem.dilation); printf("\n");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv
////////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/convnd_problem_shape.hpp/0 | {
"file_path": "cutlass/include/cutlass/conv/convnd_problem_shape.hpp",
"repo_id": "cutlass",
"token_count": 9788
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dWgrad;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm and multistage
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm and two
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm and multistage
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm and two
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AccessTypeA,
int AccessTypeB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AccessTypeA,
AccessTypeB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AccessTypeA,
int AccessTypeB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AccessTypeA,
AccessTypeB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AccessTypeA,
int AccessTypeB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AccessTypeA,
AccessTypeB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AccessTypeA,
int AccessTypeB
>
struct DefaultConv2dWgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AccessTypeA,
AccessTypeB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kWgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad.h/0 | {
"file_path": "cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad.h",
"repo_id": "cutlass",
"token_count": 9679
} | 21 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Convolution kernel with an epilogue that computes the absolute maximum value of the output
and a pre-activation-function auxiliary output. The auxiliary output is also (optionally)
stored to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct ImplicitGemmConvolutionWithAbsMax {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename EpilogueOutputOp::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using WarpMmaOperator = typename Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename WarpMmaOperator::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = Mma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
using TensorRefAux = cutlass::TensorRef<typename EpilogueOutputOp::ElementAuxOutput, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::ImplicitGemmConvolution::kConvDim
static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim,
"Convolution on different different dimensions is not supported");
static int const kConvDim = Mma::IteratorA::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
static conv::GroupMode const kGroupMode = conv::GroupMode::kNone;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefC ref_D;
TensorRefC ref_Aux;
typename EpilogueOutputOp::Params output_op;
SplitKMode split_k_mode;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size
):
problem_size(problem_size) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size,
TensorRefA const & ref_A,
TensorRefB const & ref_B,
TensorRefC const & ref_C,
TensorRefC const & ref_D,
TensorRefAux const & ref_Aux,
typename EpilogueOutputOp::Params const & output_op,
SplitKMode const & split_k_mode = SplitKMode::kSerial,
void * ptr_Vector = nullptr,
typename LayoutC::Stride::Index ldr = 0
):
problem_size(problem_size),
ref_A(ref_A),
ref_B(ref_B),
ref_C(ref_C),
ref_D(ref_D),
ref_Aux(ref_Aux),
output_op(output_op),
split_k_mode(split_k_mode),
ptr_Vector(ptr_Vector),
ldr(ldr)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size;
int swizzle_log_tile;
int gemm_k_iterations;
typename Mma::IteratorA::Params iterator_A;
typename Mma::IteratorA::Element const *ptr_A;
typename Mma::IteratorB::Params iterator_B;
typename Mma::IteratorB::Element const *ptr_B;
typename Epilogue::OutputTileIterator::Params iterator_C;
typename Epilogue::OutputTileIterator::Element *ptr_C;
typename Epilogue::OutputTileIterator::Params iterator_D;
typename Epilogue::OutputTileIterator::Element *ptr_D;
typename Epilogue::AuxOutputTileIterator::Params iterator_Aux;
typename Epilogue::AuxOutputTileIterator::Element *ptr_Aux;
typename EpilogueOutputOp::Params output_op;
int *semaphore;
SplitKMode split_k_mode;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
gemm_k_iterations(0),
ptr_Vector(nullptr),
ldr(0)
{ }
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size(args.problem_size),
implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)),
iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())),
ptr_A(args.ref_A.data()),
iterator_B(args.problem_size, args.ref_B.layout()),
ptr_B(args.ref_B.data()),
iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)),
ptr_C(args.ref_C.data()),
iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)),
ptr_D(args.ref_D.data()),
iterator_Aux(ConvOutputIteratorParameter::layout(args.ref_Aux)),
ptr_Aux(args.ref_Aux.data()),
output_op(args.output_op),
semaphore(semaphore),
split_k_mode(args.split_k_mode),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr)
{
gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
implicit_gemm_problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
ImplicitGemmConvolutionWithAbsMax() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.iterator_A,
params.problem_size,
params.ptr_A,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.k() * Mma::Shape::kK
)
);
typename Mma::IteratorB iterator_B(
params.iterator_B,
params.problem_size,
params.ptr_B,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * Mma::Shape::kK,
threadblock_tile_idx.n() * Mma::Shape::kN
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.n() * Mma::Shape::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D(
params.iterator_D,
params.ptr_D,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Tile iterator writing to auxiliary tensor.
typename Epilogue::AuxOutputTileIterator iterator_Aux(
params.iterator_Aux,
params.ptr_Aux,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C(
params.iterator_C,
params.ptr_C,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_idx.m() * params.ldr;
}
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_idx.k());
}
// Each split-k-slice writes to a unique tensor location
else if (params.split_k_mode == SplitKMode::kParallel) {
iterator_D.add_pointer_offset(threadblock_tile_idx.k() *
cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size));
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_idx.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
iterator_Aux,
ConvOutputIteratorParameter::extent(params.problem_size),
threadblock_offset);
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_absmax.h/0 | {
"file_path": "cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_absmax.h",
"repo_id": "cutlass",
"token_count": 6188
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dAnalyticParams {
using Layout = Layout_;
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams(
Conv2dProblemSize const &, // unused; placeholder to match other Params interfaces.
Layout const &layout
): layout(layout) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFewChannelsParams {
using Layout = Layout_;
int32_t stride_w;
int32_t stride_h;
int32_t stride_n;
FastDivmod divmod_P;
FastDivmod divmod_Q;
FastDivmod divmod_S;
FastDivmod divmod_C;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams() { }
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams(
Conv2dProblemSize const &problem_size, // unused; placeholder to match other Params interfaces.
Layout const &layout
):
stride_w(int32_t(layout.stride()[0])),
stride_h(int32_t(layout.stride()[1])),
stride_n(int32_t(layout.stride()[2])),
divmod_P(problem_size.P),
divmod_Q(problem_size.Q),
divmod_S(problem_size.S),
divmod_C(problem_size.C)
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams
struct Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams {
using Layout = layout::TensorNHWC;
Layout layout;
int tiled_rows_per_filter;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
CUTLASS_HOST_DEVICE
void TraceIteratorParams(
char const *conv_operator,
char const *operand,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
) {
#if !defined(__CUDA_ARCH__)
char const *fname = "conv_iterator_params.csv";
std::ifstream test(fname);
bool file_exists = test.is_open();
if (file_exists) {
test.close();
}
std::ofstream trace("conv_iterator_params.csv", std::ofstream::app);
if (!file_exists) {
trace
<< "Operator,Operand,ElementSize,CtaRows,CtaColumns,ThreadCount,AccessSize,"
<< "IterationsContiguous,IterationsStrided,DeltaContiguous,DeltaStrided\n";
}
trace << conv_operator << "," << operand << "," << element_size_bits << ","
<< threadblock_shape.row() << "," << threadblock_shape.column()
<< "," << thread_count << "," << access_size
<< "," << threadmap_iterations.contiguous() << "," << threadmap_iterations.strided()
<< "," << threadmap_delta.contiguous() << "," << threadmap_delta.strided() << "\n";
#endif
}
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) \
TraceIteratorParams(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta);
#else
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) {}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropActivationIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template<>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
PQ(problem_size.P * problem_size.Q),
pq_divmod(PQ),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropActivationIteratorOptimizedParams:\n"
" layout(w: %d, h: %d, n: %d)\n"
" inc_next[%ld, %ld, %ld]\n"
" filter_c_delta(%d) - PQ(%d)\n"
" pq_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n"
" q_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n",
stride[0], stride[1], stride[2],
inc_next[0], inc_next[1], inc_next[2],
filter_c_delta,
PQ,
pq_divmod.divisor,
pq_divmod.multiplier,
pq_divmod.shift_right,
q_divmod.divisor,
q_divmod.multiplier,
q_divmod.shift_right
);
}
#endif
};
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template <int Interleaved_>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNCxHWx<Interleaved_>> {
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorNCxHWx<kInterleaved>;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), PQ(problem_size.P * problem_size.Q), pq_divmod(PQ), q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (kInterleaved * problem_size.dilation_w) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_h
- (problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[1])
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[0] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropFilterIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
template<>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorNHWC>
{
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (int64_t(layout.stride()[2]) * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- int64_t(layout.stride()[2]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropFilterIteratorOptimizedParams:\n"
" layout[%d, %d, %d]\n"
" RS(%d), filter_c_delta(%d), inc_next(k: %ld, rs: %ld, c: %ld)\n",
stride[0], stride[1], stride[2],
RS,
filter_c_delta,
inc_next_k, inc_next_rs, inc_next_c
);
}
#endif
};
template<int Interleaved_>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorCxRSKx<Interleaved_>>
{
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorCxRSKx<kInterleaved>;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (kInterleaved * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- kInterleaved * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[2])
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * kInterleaved
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d DGRAD OutputGradient (dy) iterator
struct Conv2dDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int HW; // product of H*W
FastDivmod hw_divmod;
FastDivmod w_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
HW(problem_size.H *problem_size.W),
hw_divmod(HW),
w_divmod(problem_size.W) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * (problem_size.R - 1) * (int64_t)layout.stride()[1] * problem_size.dilation_h
- conv_sign * (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int tiled_rows_per_filter;
int conv_sign;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = ((int64_t)layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
inc_next_k =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
- (problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// StridedDgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next[3]; // {next S, next R, next K}
int64_t reset_bytes; // offset in units of bytes to move back the pointer
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = (layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
// next S
inc_next[0] =
( (int64_t)layout.stride()[0] * problem_size.stride_w
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next R
inc_next[1] =
( (int64_t)layout.stride()[1] * problem_size.stride_h
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next K
inc_next[2] =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
//- (problem_size.R * problem_size.S - 1) * layout.stride()[0]
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// offset in units of bytes to move the pointer in backward direction
reset_bytes = (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
* element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d WGRAD Output Gradient (dy) iterator
struct Conv2dWgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int NPQ; // precomputd product of N*P*Q for clearing predicates
FastDivmod pq_divmod;
FastDivmod q_divmod;
int64_t offset_next_strided; // offset in units of bytes to next npq coordinate within tile
int64_t offset_next_contiguous; // offset in units of bytes to next k coordinate within tile
int64_t inc_next_npq; // offset in units of bytes to next npq position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
NPQ(problem_size.N * problem_size.P * problem_size.Q),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Incremental offsets in unites of bytes (number of elements) * sizeof_bits<Element>::value / 8
offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0])
* element_size_bits / 8;
offset_next_contiguous = (threadmap_delta.contiguous())
* element_size_bits / 8;
inc_next_npq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0])
* element_size_bits / 8;
}
};
struct Conv2dWgradActivationIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod sc_divmod;
FastDivmod pq_divmod;
FastDivmod q_divmod;
FastDivmod c_divmod;
FastDivmod s_divmod;
int small_channel_conv_s_offset;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
layout(layout),
sc_divmod(problem_size.S * problem_size.C),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q),
c_divmod(problem_size.C),
s_divmod(problem_size.S * problem_size.dilation_w),
small_channel_conv_s_offset((problem_size.S - 1) * problem_size.dilation_w - problem_size.pad_w) {
}
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
Conv2dWgradActivationIteratorOptimizedParams(
problem_size,
layout
) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
}
};
struct PredicatedScaleBiasVectorAccessIteratorParams {
public:
/// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams() { }
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::PitchLinear const &layout) {}
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::RowMajor const &layout) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/threadblock/conv2d_params.h/0 | {
"file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_params.h",
"repo_id": "cutlass",
"token_count": 11339
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorIterator
///
template <typename WarpShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for wgrad pitch-linear data.
///
template <typename WarpShape_, typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::PitchLinear> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 1;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kIterations = WarpShape::kContiguous / 8;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>;
/// Parameters object is precomputed state and is host-constructible
using Params = Conv2dWgradActivationIteratorOptimizedParams;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
ConstPointer scale_pointer_;
ConstPointer bias_pointer_;
/// Size of tensor
Conv2dProblemSize problem_size_;
int32_t thread_offset_;
// Channel dimension in contiguous dimension stays constant for each gemm_iteration_k
int32_t filter_c_[kIterations];
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_(problem_size),
scale_pointer_(scale_pointer),
bias_pointer_(bias_pointer) {
thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4;
}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole warp tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous());
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < kIterations; ++c) {
int rsc_offset = thread_offset_ + c * 8;
int residual, tmp;
params_.sc_divmod(tmp, residual, rsc_offset);
params_.c_divmod(tmp, filter_c_[c], residual);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.fill(__float2half2_rn(0.0f));
__half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag);
// load scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2].x,
scale_pointer_ + filter_c_[c],
true
);
}
// load bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2 + 1].x,
bias_pointer_ + filter_c_[c],
true
);
}
// duplicate scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2].y = frag_ptr[c * 2].x;
}
// duplicate bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename WarpShape_,
typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorIterator<
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Fragment = typename UnderlyingIterator::Fragment;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedScaleBiasVectorIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Conv2dProblemSize const &problem_size, Layout const &layout)
: params_(problem_size, layout::TensorNHWC(0, 0, 0)){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h/0 | {
"file_path": "cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h",
"repo_id": "cutlass",
"token_count": 4343
} | 24 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/epilogue/fusion/callbacks.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Used to specify epilogue subtile shape or dispatch to automatic computation of subtile shape
struct EpilogueTileAuto {};
// Used to let the builder pick the epilogue schedule automatically.
// Can be overridden with kernel schedule tags in cutlass/gemm/dispatch_policy.hpp
struct EpilogueScheduleAuto {};
struct EpilogueIm2ColScheduleAuto {};
template <
class ArchTag,
class OpClass,
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC,
class GmemLayoutTagC,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class EpilogueScheduleType,
class FusionOpOrCallbacks = cutlass::epilogue::fusion::LinearCombination<ElementD,ElementCompute,ElementC,ElementCompute>,
class Enable = void
>
struct CollectiveBuilder {
static_assert(cutlass::detail::dependent_false<ArchTag>,
"Could not build a collective epilogue for given parameters.");
};
// helper sub-builder for epilogue fusion callbacks (for internal use by CollectiveBuilder only)
namespace detail {
// callbacks builder with operation tag
template<
class DispatchPolicy,
class FusionOp,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator,
class = void
>
struct CallbacksBuilder {
using Callbacks = fusion::FusionCallbacks<DispatchPolicy, FusionOp, TileShape_MNK, EpilogueTile_MN>;
};
// callbacks builder with callbacks passthrough
template <
class DispatchPolicy,
class FusionCallbacks,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator
>
struct CallbacksBuilder<
DispatchPolicy,
FusionCallbacks,
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator,
cute::enable_if_t<not is_base_of_v<fusion::FusionOperation, FusionCallbacks>>
> {
using Callbacks = FusionCallbacks;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "builders/sm90_builder.inl"
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/collective/collective_builder.hpp/0 | {
"file_path": "cutlass/include/cutlass/epilogue/collective/collective_builder.hpp",
"repo_id": "cutlass",
"token_count": 1142
} | 25 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree operation base implementation to enable composable fusions
for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cute/tensor.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
using namespace cute;
using cute::tuple;
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partitioning Helpers
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class CtaTileMN,
class EpilogueTile,
class TiledCopy
>
CUTLASS_HOST_DEVICE
constexpr auto
sm90_partition_for_epilogue(
CtaTileMN cT, // (CTA_M,CTA_N,...)
EpilogueTile epi_tile, // (EPI_TILE_M,EPI_TILE_N)
TiledCopy tiled_copy,
int thread_idx) {
ThrCopy thread_copy = tiled_copy.get_thread_slice(thread_idx);
Tensor cT_epi = flat_divide(cT, epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N,...)
if constexpr (ReferenceSrc) {
return thread_copy.partition_S(cT_epi); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,...)
}
else {
return thread_copy.partition_D(cT_epi); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,...)
}
}
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class Engine, class LayoutMNL,
class TileShapeMNK,
class TileCoordMNKL,
class EpilogueTile,
class TiledCopy
>
CUTLASS_HOST_DEVICE
constexpr auto
sm90_partition_for_epilogue(
Tensor<Engine, LayoutMNL> mT, // (M,N,L)
TileShapeMNK tile_shape_mnk, // (CTA_M,CTA_N,CTA_K)
TileCoordMNKL tile_coord_mnkl, // (m,n,k,l)
EpilogueTile epi_tile, // (EPI_TILE_M,EPI_TILE_N)
TiledCopy tiled_copy,
int thread_idx) {
auto [m, n, k, l] = tile_coord_mnkl;
auto coord_shape =
make_coord(m, n, l)
;
Tensor cT = local_tile(mT, take<0,2>(tile_shape_mnk), coord_shape); // (CTA_M,CTA_N)
Tensor tCcT =
sm90_partition_for_epilogue<ReferenceSrc>(cT, epi_tile, tiled_copy, thread_idx); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
return tCcT;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Visitor Implementation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class ResidueMN,
class EpilogueTile
>
struct ProducerLoadArgs {
ProblemShapeMNKL problem_shape_mnkl;
TileShapeMNK tile_shape_mnk;
TileCoordMNKL tile_coord_mnkl;
ResidueMN residue_mn;
EpilogueTile epi_tile;
int thread_idx;
CUTLASS_DEVICE
ProducerLoadArgs(
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_mnk,
TileCoordMNKL tile_coord_mnkl,
ResidueMN residue_mn,
EpilogueTile epi_tile,
int thread_idx)
: problem_shape_mnkl(problem_shape_mnkl),
tile_shape_mnk(tile_shape_mnk),
tile_coord_mnkl(tile_coord_mnkl),
residue_mn(residue_mn),
epi_tile(epi_tile),
thread_idx(thread_idx) {}
};
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class ResidueMN,
class EpilogueTile,
class TiledCopy,
class CoordTensor,
class ThrCoordTensor,
class ThrSrcTensor
>
struct ConsumerStoreArgs {
ProblemShapeMNKL problem_shape_mnkl;
TileShapeMNK tile_shape_mnk;
TileCoordMNKL tile_coord_mnkl;
ResidueMN residue_mn;
EpilogueTile epi_tile;
TiledCopy tiled_copy;
int thread_idx;
CoordTensor cD;
ThrCoordTensor tCcD;
ThrSrcTensor const& tCrC;
CUTLASS_DEVICE
ConsumerStoreArgs(
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_mnk,
TileCoordMNKL tile_coord_mnkl,
ResidueMN residue_mn,
EpilogueTile epi_tile,
TiledCopy tiled_copy,
int thread_idx,
CoordTensor cD,
ThrCoordTensor tCcD,
ThrSrcTensor const& tCrC)
: problem_shape_mnkl(problem_shape_mnkl),
tile_shape_mnk(tile_shape_mnk),
tile_coord_mnkl(tile_coord_mnkl),
residue_mn(residue_mn),
epi_tile(epi_tile),
tiled_copy(tiled_copy),
thread_idx(thread_idx),
cD(cD),
tCcD(tCcD),
tCrC(tCrC) {}
};
template <class... Ops>
struct Sm90VisitorImplBase {
// Shared memory allocation
using SharedStorage = tuple<typename Ops::SharedStorage...>;
// Host side fusion arguments
using Arguments = tuple<typename Ops::Arguments...>;
// Device side fusion params (Kernel-entry API)
using Params = tuple<typename Ops::Params...>;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
uint8_t* op_workspace = reinterpret_cast<uint8_t*>(workspace);
return transform_apply(tuple<Ops...>{}, args,
[&] (auto&& op, auto const& op_args) {
using Op = cute::remove_cvref_t<decltype(op)>;
auto ret = Op::to_underlying_arguments(problem_shape, op_args, op_workspace);
if (op_workspace != nullptr) {
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
op_workspace += round_nearest(op_workspace_size, MinWorkspaceAlignment);
}
return ret;
},
[] (auto&&... op_params) { return cute::make_tuple(op_params...); }
);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return transform_apply(tuple<Ops...>{}, args,
[&] (auto&& op, auto const& op_args) {
using Op = cute::remove_cvref_t<decltype(op)>;
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
return round_nearest(op_workspace_size, MinWorkspaceAlignment);
},
[&] (auto&&... op_workspace_size) {
return (0 + ... + op_workspace_size);
}
);
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* op_workspace = reinterpret_cast<uint8_t*>(workspace);
return transform_apply(tuple<Ops...>{}, args,
// Initialize each operation's workspace, stopping at the first error
[&] (auto&& op, auto const& op_args) {
if (status != Status::kSuccess) {
return status;
}
using Op = cute::remove_cvref_t<decltype(op)>;
status = Op::initialize_workspace(problem_shape, op_args, op_workspace, stream, cuda_adapter);
if (op_workspace != nullptr) {
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
op_workspace += round_nearest(op_workspace_size, MinWorkspaceAlignment);
}
return status;
},
// Return the final status
[&] (auto const&...ops) { return status; }
);
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops(transform_apply(tuple<Ops...>{}, params, shared_storage,
[] (auto&& op, auto const& op_params, auto&& op_storage) {
using Op = cute::remove_cvref_t<decltype(op)>;
return Op(op_params, op_storage);
},
[] (auto&&... ops) { return cute::make_tuple(ops...); }
)) {}
// Ops can store kernel persistent variables (e.g. descriptors, scalars, wave counters)
tuple<Ops...> ops;
};
template <class... Ops>
struct Sm90VisitorImpl : Sm90VisitorImplBase<Ops...> {
using Impl = Sm90VisitorImplBase<Ops...>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90VisitorImpl() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImpl(Params const& params, SharedStorage const& shared_storage)
: Impl(params, shared_storage) {}
using Impl::ops;
//
// Queries for kernel runtime
//
// Is a specialized warp for producer TMA loads needed
// e.g. Aux tensor loads, broadcasts using TMA bulk copy
// This condition cannot change between work tiles because it is used
// to determine whether the load warp should exit early or not
// e.g. for batched beta this must always be true regardless of current batch idx
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return cute::apply(ops,
[] (auto const&... op) {
return (false || ... || op.is_producer_load_needed());
}
);
}
// Is a producer TMA load specifically for C needed
// If this is true then is_producer_load_needed must also be true
// This condition can change between work tiles because it is only used
// to determine whether the TMA and smem loads for C of a given tile should happen
// e.g. for batched beta this can be false depending on current batch idx
CUTLASS_DEVICE bool
is_C_load_needed() const {
return cute::apply(ops,
[] (auto const&... op) {
return (false || ... || op.is_C_load_needed());
}
);
}
//
// Producer load callbacks, called by the epilogue load warp.
// Operations usually only define this if TMA load is needed. Most operations will reuse this empy implementation
// Load callbacks are responsible for issuing corresponding mbarrier expect-tx ops for any TMA loads issued, but
// are not responsible for issuing the producer_commit barrier arrival, which is issued by the collective instead
// If this is non-empty, is_producer_load_needed must be true.
//
template <class CallbacksTuple>
struct ProducerLoadCallbacks {
// Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables
CallbacksTuple callbacks_tuple;
// Before entry of the subtile load loop. Bulk copies usually performed here.
// Upon entry the producer_acquire of the first subtile lock has completed.
// full_mbarrier_ptr is the corresponding barrier for the subsequent producer_commit arrival
CUTLASS_DEVICE void
begin(uint64_t* full_mbarrier_ptr, int load_iteration, bool issue_tma_load) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.begin(full_mbarrier_ptr, load_iteration, issue_tma_load);
}
);
}
// Entry of the subtile load loop. Aux loads usually performed here
// Upon entry the producer acquire of the current subtile lock has completed.
// Upon exit all TMA loads for this subtile must have been issued, with corresponding expect-tx operations
CUTLASS_DEVICE void
step(uint64_t* full_mbarrier_ptr, int epi_m, int epi_n, int load_iteration, bool issue_tma_load) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.step(full_mbarrier_ptr, epi_m, epi_n, load_iteration, issue_tma_load);
}
);
}
// Exit of the subtile load loop.
CUTLASS_DEVICE void
end() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.end();
}
);
}
};
// Producer load callbacks factory
// All operations must redefine this, but most can just dispatch to the base impl
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return transform_apply(ops,
[&] (auto& op) {
return op.get_producer_load_callbacks(args);
},
[] (auto&&... callbacks) {
auto callbacks_tuple = cute::make_tuple(callbacks...);
return ProducerLoadCallbacks<decltype(callbacks_tuple)>{callbacks_tuple};
}
);
}
//
// Consumer store callbacks, called by the epilogue store warps.
// All operations must redefine this, with optional inheritance from this empty implementation.
//
template <class CallbacksTuple>
struct ConsumerStoreCallbacks {
// Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables
CallbacksTuple callbacks_tuple;
// Before entry of subtile store loop. Gmem broadcasts usually performed here.
CUTLASS_DEVICE void
begin() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.begin();
}
);
}
// Start of subtile store iteration. Smem broadcasts usually performed here.
// Upon entry, all producer loads for this subtile are completed and visible.
CUTLASS_DEVICE void
previsit(int epi_m, int epi_n, int load_iteration, bool is_producer_load_needed) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.previsit(epi_m, epi_n, load_iteration, is_producer_load_needed);
}
);
}
// Perform the fused elementwise computation
template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInputs, FragmentSize> const&... frg_inputs) // depends on the N-naryness of the op
= delete; // Must be implemented for each operation
// After visit call. Smem reductions usually performed here
// reduction_buffer is an arbitrary smem tensor that can be used for workspace
// It is each nodes reponsibility to assert that this buffer is sufficiently sized
// and to ensure that this buffer is no longer needed upon callback exit
// i.e. results are synchronized and no longer in the reduction buffer
template <class STensor, class SyncFn>
CUTLASS_DEVICE void
reduce(STensor&& reduction_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.reduce(reduction_buffer, sync_fn, epi_m, epi_n, is_last_iteration);
}
);
}
// After reduce call, before smem async fence. Smem stores usually performed here.
// Upon exit, all smem stores for TMA must have been issued
CUTLASS_DEVICE void
postreduce(int epi_m, int epi_n, int store_iteration, bool issue_smem_store) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.postreduce(epi_m, epi_n, store_iteration, issue_smem_store);
}
);
}
// After smem async fence, before TMA store commit. Aux stores usually performed here
// Upon exit, all TMA stores for this subtile must have been issued
// Because of the TMA store delay optimization, this entry point must ONLY be used for TMA stores
// other gmem stores can be placed in the reduce or postreduce entry points
CUTLASS_DEVICE void
tma_store(int epi_m, int epi_n, int store_iteration, bool issue_tma_store) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.tma_store(epi_m, epi_n, store_iteration, issue_tma_store);
}
);
}
// Exit of subtile store loop. Gmem reductions usually performed here.
CUTLASS_DEVICE void
end() {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.end();
}
);
}
};
// Consumer store callbacks factory
// All operations must redefine this
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return transform_apply(ops,
[&] (auto& op) {
return op.template get_consumer_store_callbacks<ReferenceSrc>(args);
},
[] (auto&&... callbacks) {
auto callbacks_tuple = cute::make_tuple(callbacks...);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>{callbacks_tuple};
}
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Convenience aliases
using EmptyProducerLoadCallbacks = Sm90VisitorImpl<>::ProducerLoadCallbacks<cute::tuple<>>;
using EmptyConsumerStoreCallbacks = Sm90VisitorImpl<>::ConsumerStoreCallbacks<cute::tuple<>>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Tree visitor
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class NodeOp, class... ChildOps>
struct Sm90TreeVisitor : Sm90VisitorImpl<ChildOps..., NodeOp> {
using Impl = Sm90VisitorImpl<ChildOps..., NodeOp>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90TreeVisitor() {}
CUTLASS_HOST_DEVICE
Sm90TreeVisitor(
Params const& params,
SharedStorage const& shared_storage)
: Impl(params, shared_storage) {}
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
constexpr int Rm1 = sizeof...(ChildOps);
return cute::detail::tapply(callbacks_tuple,
[&] (auto& child_callbacks) {
return child_callbacks.visit(frg_acc, epi_v, epi_m, epi_n); // child ops must be nullary (e.g. loads, trees)
},
[&] (auto&&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
},
make_seq<Rm1>{} // restrict the transform to R-1 child ops, apply is for node op
);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<ChildOps..., NodeOp>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// DAG visitors
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// Most DAG fusions can be represented as a set of output trees with a common input tree
// The common input is first evaluated, then the result is passed as the acc fragment to the output trees
template <class InputTree, class OutputTree, class... AuxOutTrees>
struct Sm90SplitTreeVisitor : Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree> {
using Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree>::Sm90VisitorImpl;
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
Array frg_input = get<0>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n);
constexpr int Rm2 = sizeof...(AuxOutTrees);
cute::for_each(make_seq<Rm2>{}, // restrict the sequence to aux out trees
[&] (auto I) {
get<I+1>(callbacks_tuple).visit(frg_input, epi_v, epi_m, epi_n);
}
);
return get<Rm2+1>(callbacks_tuple).visit(frg_input, epi_v, epi_m, epi_n);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
// deducing the output type for all the nodes is tricky so we just convert them all to a common type
// if multiple compute types are needed then split into multiple subgraphs grouped by type
class ElementCompute,
class EdgeTuple, // tuple of int_sequence, each sequence is the children indices (indexed by topological order) for each node
class... Ops // in topological order, last op is the output. EdgeTuple must match this order
>
struct Sm90TopologicalVisitor : Sm90VisitorImpl<Ops...> {
static_assert(is_static_v<EdgeTuple>);
static_assert(cute::rank(EdgeTuple{}) == sizeof...(Ops));
static_assert(sizeof...(Ops) > 1);
using Sm90VisitorImpl<Ops...>::Sm90VisitorImpl;
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
constexpr int Rm1 = sizeof...(Ops) - 1;
auto frg_compute_tuple = cute::repeat<Rm1>(Array<ElementCompute, FragmentSize>{});
return cute::detail::tapply(EdgeTuple{}, callbacks_tuple, frg_compute_tuple,
// Visit the first R-1 ops in topological order
[&] (auto&& edge_seq, auto& callbacks, auto& frg_compute) {
frg_compute = cute::detail::apply(frg_compute_tuple,
// Compute the current op with children inputs
[&] (auto const&... frg_inputs) {
auto frg_output = callbacks.visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
using ElementOutput = typename decltype(frg_output)::Element;
using ConvertOutput = NumericArrayConverter<ElementCompute, ElementOutput, FragmentSize>;
ConvertOutput convert_output{};
return convert_output(frg_output);
},
// Get inputs in the sequence given by the children indices of the current op
edge_seq
);
return frg_compute; // unused
},
// Visit the last op
[&] (auto const&...ops) {
return cute::detail::apply(frg_compute_tuple,
// Compute the last op with children inputs
[&] (auto const&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
},
// Get inputs in the sequence given by the children indices of the last op
get<Rm1>(EdgeTuple{})
);
},
// Transform to visit R-1 ops, apply to visit last op
make_seq<Rm1>{}
);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<Ops...>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Base specializations so we can have standard layout params and simple aggregate initializers
namespace detail {
template <class Op0>
struct Sm90VisitorImplBase<Op0> {
// Retain tuple for SharedStorage because empty structs have 1B alignment
// tuples use multiple inheritance, avoids this problem
using SharedStorage = tuple<
typename Op0::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
};
struct Params {
typename Op0::Params op_0;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, workspace)
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage))
}) {}
tuple<Op0> ops;
};
template <class Op0, class Op1>
struct Sm90VisitorImplBase<Op0, Op1> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace)
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage))
}) {}
tuple<Op0, Op1> ops;
};
template <class Op0, class Op1, class Op2>
struct Sm90VisitorImplBase<Op0, Op1, Op2> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage,
typename Op2::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
typename Op2::Arguments op_2;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
typename Op2::Params op_2;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
size_t op_1_workspace_size = Op1::get_workspace_size(problem_shape, args.op_1);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
uint8_t* op_2_workspace = op_1_workspace + op_1_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace),
Op2::to_underlying_arguments(problem_shape, args.op_2, op_2_workspace)
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op2::initialize_workspace(problem_shape, args.op_2, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage)),
Op2(params.op_2, get<2>(shared_storage))
}) {}
tuple<Op0, Op1, Op2> ops;
};
template <class Op0, class Op1, class Op2, class Op3>
struct Sm90VisitorImplBase<Op0, Op1, Op2, Op3> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage,
typename Op2::SharedStorage,
typename Op3::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
typename Op2::Arguments op_2;
typename Op3::Arguments op_3;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
typename Op2::Params op_2;
typename Op3::Params op_3;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
size_t op_1_workspace_size = Op1::get_workspace_size(problem_shape, args.op_1);
size_t op_2_workspace_size = Op2::get_workspace_size(problem_shape, args.op_2);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
uint8_t* op_2_workspace = op_1_workspace + op_1_workspace_size;
uint8_t* op_3_workspace = op_2_workspace + op_2_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace),
Op2::to_underlying_arguments(problem_shape, args.op_2, op_2_workspace),
Op3::to_underlying_arguments(problem_shape, args.op_3, op_3_workspace)
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op3::get_workspace_size(problem_shape, args.op_3);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op2::initialize_workspace(problem_shape, args.op_2, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op3::initialize_workspace(problem_shape, args.op_3, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op3::get_workspace_size(problem_shape, args.op_3);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage)),
Op2(params.op_2, get<2>(shared_storage)),
Op3(params.op_3, get<3>(shared_storage))
}) {}
tuple<Op0, Op1, Op2, Op3> ops;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| cutlass/include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp/0 | {
"file_path": "cutlass/include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp",
"repo_id": "cutlass",
"token_count": 14463
} | 26 |